summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/raw
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/raw')
-rw-r--r--src/spdk/dpdk/drivers/raw/Makefile18
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c300
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h46
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h35
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_rawdev_dpaa2_cmdif_version.map3
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile36
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c1504
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h178
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h46
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h355
-rw-r--r--src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_rawdev_dpaa2_qdma_version.map20
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/Makefile39
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/README46
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.c410
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.h29
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_compat.h58
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_defines.h1739
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.c724
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.h11
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c392
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.h224
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme.c1406
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_dperf.c301
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_error.c411
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_iperf.c715
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_pr.c357
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_hw.h151
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port.c429
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port_error.c163
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/meson.build29
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.c87
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.h14
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.c102
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.h19
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.c316
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.h102
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.c714
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.h332
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.c512
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.h131
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.c145
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.h281
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c650
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.h211
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_osdep.h93
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.c299
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.h168
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c498
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/osdep_raw/osdep_generic.h76
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/base/osdep_rte/osdep_generic.h61
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.c1731
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.h79
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/meson.build25
-rw-r--r--src/spdk/dpdk/drivers/raw/ifpga/rte_rawdev_ifpga_version.map3
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/Makefile28
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c395
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c245
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h233
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h301
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map3
-rw-r--r--src/spdk/dpdk/drivers/raw/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/Makefile28
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/ntb.c1513
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/ntb.h237
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.c391
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.h86
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/rte_pmd_ntb.h43
-rw-r--r--src/spdk/dpdk/drivers/raw/ntb/rte_rawdev_ntb_version.map3
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/Makefile34
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_msg.c105
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c437
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.h196
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_test.c217
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_dma/rte_rawdev_octeontx2_dma_version.map3
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c846
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h52
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c361
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h499
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c172
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c475
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h10
-rw-r--r--src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map4
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton/Makefile26
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton/meson.build6
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton/rte_rawdev_skeleton_version.map3
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.c768
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.h136
-rw-r--r--src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c460
97 files changed, 25324 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/raw/Makefile b/src/spdk/dpdk/drivers/raw/Makefile
new file mode 100644
index 000000000..80b043eb1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# DIRS-$(<configuration>) += <directory>
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += ioat
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += ntb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += octeontx2_dma
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += octeontx2_ep
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile
new file mode 100644
index 000000000..227ec4896
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_rawdev_dpaa2_cmdif.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_common_dpaax
+
+EXPORT_MAP := rte_rawdev_dpaa2_cmdif_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV)-include += rte_pmd_dpaa2_cmdif.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
new file mode 100644
index 000000000..7c21c6a52
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_atomic.h>
+#include <rte_interrupts.h>
+#include <rte_branch_prediction.h>
+#include <rte_lcore.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_cmdif_logs.h"
+#include "rte_pmd_dpaa2_cmdif.h"
+
+/* Dynamic log type identifier */
+int dpaa2_cmdif_logtype;
+
+/* CMDIF driver name */
+#define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
+
+/*
+ * This API provides the DPCI device ID in 'attr_value'.
+ * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
+ */
+static int
+dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(attr_name);
+
+ if (!attr_value) {
+ DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+ *attr_value = cidev->dpci_id;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
+ struct dpaa2_queue *txq;
+ struct qbman_fd fd;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ uint32_t retry_count = 0;
+ int ret;
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /* Set some of the FD parameters to i.
+ * For performance reasons do not memset
+ */
+ fd.simple.bpid_offset = 0;
+ fd.simple.ctrl = 0;
+
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
+ DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
+ DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
+ DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
+
+ /* Enqueue a packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
+ retry_count++;
+ } while ((ret == -EBUSY) && (retry_count < DPAA2_MAX_TX_RETRY_COUNT));
+
+ if (ret < 0)
+ return ret;
+
+ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
+
+ return 1;
+}
+
+static int
+dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
+ struct dpaa2_queue *rxq;
+ struct qbman_swp *swp;
+ struct qbman_result *dq_storage;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ uint8_t status;
+ int ret;
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with new token by QBMAN */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
+ return 0;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
+ cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
+ cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
+ cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
+
+ DPAA2_CMDIF_DP_DEBUG("packet received\n");
+
+ return 1;
+}
+
+static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
+ .attr_get = dpaa2_cmdif_get_attr,
+ .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
+ .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
+};
+
+static int
+dpaa2_cmdif_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ struct rte_rawdev *rawdev;
+ struct dpaa2_dpci_dev *cidev;
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
+ socket_id);
+ if (!rawdev) {
+ DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &dpaa2_cmdif_ops;
+ rawdev->device = &vdev->device;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cidev = rte_dpaa2_alloc_dpci_dev();
+ if (!cidev) {
+ DPAA2_CMDIF_ERR("Unable to allocate CI device");
+ rte_rawdev_pmd_release(rawdev);
+ return -ENODEV;
+ }
+
+ rawdev->dev_private = cidev;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* The primary process will only free the DPCI device */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_dpaa2_free_dpci_dev(rdev->dev_private);
+
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ DPAA2_CMDIF_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret = 0;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
+
+ return ret;
+}
+
+static int
+dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -1;
+
+ DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_destroy(name);
+
+ return ret;
+}
+
+static struct rte_vdev_driver dpaa2_cmdif_drv = {
+ .probe = dpaa2_cmdif_probe,
+ .remove = dpaa2_cmdif_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
+
+RTE_INIT(dpaa2_cmdif_init_log)
+{
+ dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
+ if (dpaa2_cmdif_logtype >= 0)
+ rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
new file mode 100644
index 000000000..8991e8327
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_CMDIF_LOGS_H__
+#define __DPAA2_CMDIF_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_cmdif_logtype;
+
+#define DPAA2_CMDIF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_cmdif_logtype, "dpaa2_cmdif: " \
+ fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_cmdif_logtype, "dpaa2_cmdif: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_DEBUG(">>")
+
+#define DPAA2_CMDIF_INFO(fmt, args...) \
+ DPAA2_CMDIF_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_ERR(fmt, args...) \
+ DPAA2_CMDIF_LOG(ERR, fmt, ## args)
+#define DPAA2_CMDIF_WARN(fmt, args...) \
+ DPAA2_CMDIF_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_CMDIF_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_cmdif: " fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DP_DEBUG(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_CMDIF_DP_INFO(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_DP_WARN(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_CMDIF_LOGS_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build
new file mode 100644
index 000000000..e7656a3cd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+reason = 'missing dependency, DPDK DPAA2 mempool driver'
+deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
+sources = files('dpaa2_cmdif.c')
+
+install_headers('rte_pmd_dpaa2_cmdif.h')
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
new file mode 100644
index 000000000..483b66eaa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_CMDIF_H__
+#define __RTE_PMD_DPAA2_CMDIF_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 AIOP CMDIF PMD specific structures.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The context required in the I/O path for DPAA2 AIOP Command Interface */
+struct rte_dpaa2_cmdif_context {
+ /** Size to populate in QBMAN FD */
+ uint32_t size;
+ /** FRC to populate in QBMAN FD */
+ uint32_t frc;
+ /** FLC to populate in QBMAN FD */
+ uint64_t flc;
+ /** Priority of the command. This priority determines DPCI Queue*/
+ uint8_t priority;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_PMD_DPAA2_CMDIF_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_rawdev_dpaa2_cmdif_version.map b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_rawdev_dpaa2_cmdif_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/rte_rawdev_dpaa2_cmdif_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile
new file mode 100644
index 000000000..7b2abdddb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_rawdev_dpaa2_qdma.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_ring
+LDLIBS += -lrte_common_dpaax
+
+EXPORT_MAP := rte_rawdev_dpaa2_qdma_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV)-include += rte_pmd_dpaa2_qdma.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
new file mode 100644
index 000000000..1be1b6ddf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -0,0 +1,1504 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_atomic.h>
+#include <rte_lcore.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+#include <rte_kvargs.h>
+
+#include <mc/fsl_dpdmai.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+
+#include "rte_pmd_dpaa2_qdma.h"
+#include "dpaa2_qdma.h"
+#include "dpaa2_qdma_logs.h"
+
+#define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
+
+/* Dynamic log type identifier */
+int dpaa2_qdma_logtype;
+
+uint32_t dpaa2_coherent_no_alloc_cache;
+uint32_t dpaa2_coherent_alloc_cache;
+
+/* QDMA device */
+static struct qdma_device qdma_dev;
+
+/* QDMA H/W queues list */
+TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
+static struct qdma_hw_queue_list qdma_queue_list
+ = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
+
+/* QDMA Virtual Queues */
+static struct qdma_virt_queue *qdma_vqs;
+
+/* QDMA per core data */
+static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+
+typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
+
+typedef uint16_t (dpdmai_dev_get_job_t)(const struct qbman_fd *fd,
+ struct rte_qdma_job **job);
+typedef int (dpdmai_dev_set_fd_t)(struct qbman_fd *fd,
+ struct rte_qdma_job *job,
+ struct rte_qdma_rbp *rbp,
+ uint16_t vq_id);
+dpdmai_dev_get_job_t *dpdmai_dev_get_job;
+dpdmai_dev_set_fd_t *dpdmai_dev_set_fd;
+
+static inline int
+qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
+ uint32_t len, struct qbman_fd *fd,
+ struct rte_qdma_rbp *rbp)
+{
+ fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
+ fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+
+ fd->simple_pci.len_sl = len;
+
+ fd->simple_pci.bmt = 1;
+ fd->simple_pci.fmt = 3;
+ fd->simple_pci.sl = 1;
+ fd->simple_pci.ser = 1;
+
+ fd->simple_pci.sportid = rbp->sportid; /*pcie 3 */
+ fd->simple_pci.srbp = rbp->srbp;
+ if (rbp->srbp)
+ fd->simple_pci.rdttype = 0;
+ else
+ fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+ /*dest is pcie memory */
+ fd->simple_pci.dportid = rbp->dportid; /*pcie 3 */
+ fd->simple_pci.drbp = rbp->drbp;
+ if (rbp->drbp)
+ fd->simple_pci.wrttype = 0;
+ else
+ fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+ fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
+ fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+
+ return 0;
+}
+
+static inline int
+qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
+ uint32_t len, struct qbman_fd *fd)
+{
+ fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
+ fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
+
+ fd->simple_ddr.len = len;
+
+ fd->simple_ddr.bmt = 1;
+ fd->simple_ddr.fmt = 3;
+ fd->simple_ddr.sl = 1;
+ fd->simple_ddr.ser = 1;
+ /**
+ * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+ * Coherent copy of cacheable memory,
+ * lookup in downstream cache, no allocate
+ * on miss
+ */
+ fd->simple_ddr.rns = 0;
+ fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+ /**
+ * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+ * Coherent write of cacheable memory,
+ * lookup in downstream cache, no allocate on miss
+ */
+ fd->simple_ddr.wns = 0;
+ fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+ fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
+ fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+
+ return 0;
+}
+
+static void
+dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+ struct rte_qdma_rbp *rbp,
+ uint64_t src, uint64_t dest,
+ size_t len, uint32_t flags)
+{
+ struct qdma_sdd *sdd;
+
+ sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
+ (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
+
+ /* first frame list to source descriptor */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
+ DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+
+ /* source and destination descriptor */
+ if (rbp && rbp->enable) {
+ /* source */
+ sdd->read_cmd.portid = rbp->sportid;
+ sdd->rbpcmd_simple.pfid = rbp->spfid;
+ sdd->rbpcmd_simple.vfid = rbp->svfid;
+
+ if (rbp->srbp) {
+ sdd->read_cmd.rbp = rbp->srbp;
+ sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+ } else {
+ sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+ }
+ sdd++;
+ /* destination */
+ sdd->write_cmd.portid = rbp->dportid;
+ sdd->rbpcmd_simple.pfid = rbp->dpfid;
+ sdd->rbpcmd_simple.vfid = rbp->dvfid;
+
+ if (rbp->drbp) {
+ sdd->write_cmd.rbp = rbp->drbp;
+ sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+ } else {
+ sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+ }
+
+ } else {
+ sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+ sdd++;
+ sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+ }
+ fle++;
+ /* source frame list to source buffer */
+ if (flags & RTE_QDMA_JOB_SRC_PHY) {
+ DPAA2_SET_FLE_ADDR(fle, src);
+ DPAA2_SET_FLE_BMT(fle);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ fle++;
+ /* destination frame list to destination buffer */
+ if (flags & RTE_QDMA_JOB_DEST_PHY) {
+ DPAA2_SET_FLE_BMT(fle);
+ DPAA2_SET_FLE_ADDR(fle, dest);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ /* Final bit: 1, for last frame list */
+ DPAA2_SET_FLE_FIN(fle);
+}
+
+static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,
+ struct rte_qdma_job *job,
+ struct rte_qdma_rbp *rbp,
+ uint16_t vq_id)
+{
+ struct rte_qdma_job **ppjob;
+ size_t iova;
+ int ret = 0;
+
+ if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
+ iova = (size_t)job->dest;
+ else
+ iova = (size_t)job->src;
+
+ /* Set the metadata */
+ job->vq_id = vq_id;
+ ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
+ *ppjob = job;
+
+ if ((rbp->drbp == 1) || (rbp->srbp == 1))
+ ret = qdma_populate_fd_pci((phys_addr_t) job->src,
+ (phys_addr_t) job->dest,
+ job->len, fd, rbp);
+ else
+ ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
+ (phys_addr_t) job->dest,
+ job->len, fd);
+ return ret;
+}
+static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
+ struct rte_qdma_job *job,
+ struct rte_qdma_rbp *rbp,
+ uint16_t vq_id)
+{
+ struct rte_qdma_job **ppjob;
+ struct qbman_fle *fle;
+ int ret = 0;
+ /*
+ * Get an FLE/SDD from FLE pool.
+ * Note: IO metadata is before the FLE and SDD memory.
+ */
+ ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&ppjob));
+ if (ret) {
+ DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+ return ret;
+ }
+
+ /* Set the metadata */
+ job->vq_id = vq_id;
+ *ppjob = job;
+
+ fle = (struct qbman_fle *)(ppjob + 1);
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+
+ /* Populate FLE */
+ memset(fle, 0, QDMA_FLE_POOL_SIZE);
+ dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
+ job->len, job->flags);
+
+ return 0;
+}
+
+static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,
+ struct rte_qdma_job **job)
+{
+ uint16_t vqid;
+ size_t iova;
+ struct rte_qdma_job **ppjob;
+
+ if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
+ iova = (size_t) (((uint64_t)fd->simple_pci.daddr_hi) << 32
+ | (uint64_t)fd->simple_pci.daddr_lo);
+ else
+ iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
+ | (uint64_t)fd->simple_pci.saddr_lo);
+
+ ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
+ *job = (struct rte_qdma_job *)*ppjob;
+ (*job)->status = (fd->simple_pci.acc_err << 8) | (fd->simple_pci.error);
+ vqid = (*job)->vq_id;
+
+ return vqid;
+}
+
+static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,
+ struct rte_qdma_job **job)
+{
+ struct rte_qdma_job **ppjob;
+ uint16_t vqid;
+ /*
+ * Fetch metadata from FLE. job and vq_id were set
+ * in metadata in the enqueue operation.
+ */
+ ppjob = (struct rte_qdma_job **)
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ ppjob -= 1;
+
+ *job = (struct rte_qdma_job *)*ppjob;
+ (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
+ (DPAA2_GET_FD_FRC(fd) & 0xFF);
+ vqid = (*job)->vq_id;
+
+ /* Free FLE to the pool */
+ rte_mempool_put(qdma_dev.fle_pool, (void *)ppjob);
+
+ return vqid;
+}
+
+static struct qdma_hw_queue *
+alloc_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_hw_queue *queue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Get a free queue from the list */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next) {
+ if (queue->num_users == 0) {
+ queue->lcore_id = lcore_id;
+ queue->num_users++;
+ break;
+ }
+ }
+
+ return queue;
+}
+
+static void
+free_hw_queue(struct qdma_hw_queue *queue)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ queue->num_users--;
+}
+
+
+static struct qdma_hw_queue *
+get_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_per_core_info *core_info;
+ struct qdma_hw_queue *queue, *temp;
+ uint32_t least_num_users;
+ int num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+
+ /*
+ * Allocate a HW queue if there are less queues
+ * than maximum per core queues configured
+ */
+ if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
+ queue = alloc_hw_queue(lcore_id);
+ if (queue) {
+ core_info->hw_queues[num_hw_queues] = queue;
+ core_info->num_hw_queues++;
+ return queue;
+ }
+ }
+
+ queue = core_info->hw_queues[0];
+ /* In case there is no queue associated with the core return NULL */
+ if (!queue)
+ return NULL;
+
+ /* Fetch the least loaded H/W queue */
+ least_num_users = core_info->hw_queues[0]->num_users;
+ for (i = 0; i < num_hw_queues; i++) {
+ temp = core_info->hw_queues[i];
+ if (temp->num_users < least_num_users)
+ queue = temp;
+ }
+
+ if (queue)
+ queue->num_users++;
+
+ return queue;
+}
+
+static void
+put_hw_queue(struct qdma_hw_queue *queue)
+{
+ struct qdma_per_core_info *core_info;
+ int lcore_id, num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /*
+ * If this is the last user of the queue free it.
+ * Also remove it from QDMA core info.
+ */
+ if (queue->num_users == 1) {
+ free_hw_queue(queue);
+
+ /* Remove the physical queue from core info */
+ lcore_id = queue->lcore_id;
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+ for (i = 0; i < num_hw_queues; i++) {
+ if (queue == core_info->hw_queues[i])
+ break;
+ }
+ for (; i < num_hw_queues - 1; i++)
+ core_info->hw_queues[i] = core_info->hw_queues[i + 1];
+ core_info->hw_queues[i] = NULL;
+ } else {
+ queue->num_users--;
+ }
+}
+
+int
+rte_qdma_init(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_init(&qdma_dev.lock);
+
+ return 0;
+}
+
+void
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
+}
+
+int
+rte_qdma_reset(void)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before reset.");
+ return -EBUSY;
+ }
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
+ qdma_vqs[i].num_dequeues))
+ DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+ return -EBUSY;
+ }
+
+ /* Reset HW queues */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next)
+ queue->num_users = 0;
+
+ /* Reset and free virtual queues */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ }
+ if (qdma_vqs)
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+
+ /* Reset per core info */
+ memset(&qdma_core_info, 0,
+ sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
+
+ /* Free the FLE pool */
+ if (qdma_dev.fle_pool)
+ rte_mempool_free(qdma_dev.fle_pool);
+
+ /* Reset QDMA device structure */
+ qdma_dev.mode = RTE_QDMA_MODE_HW;
+ qdma_dev.max_hw_queues_per_core = 0;
+ qdma_dev.fle_pool = NULL;
+ qdma_dev.fle_pool_count = 0;
+ qdma_dev.max_vqs = 0;
+
+ return 0;
+}
+
+int
+rte_qdma_configure(struct rte_qdma_config *qdma_config)
+{
+ int ret;
+ char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Reset the QDMA device */
+ ret = rte_qdma_reset();
+ if (ret) {
+ DPAA2_QDMA_ERR("Resetting QDMA failed");
+ return ret;
+ }
+
+ /* Set mode */
+ qdma_dev.mode = qdma_config->mode;
+
+ /* Set max HW queue per core */
+ if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
+ DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
+ MAX_HW_QUEUE_PER_CORE);
+ return -EINVAL;
+ }
+ qdma_dev.max_hw_queues_per_core =
+ qdma_config->max_hw_queues_per_core;
+
+ /* Allocate Virtual Queues */
+ qdma_vqs = rte_malloc("qdma_virtual_queues",
+ (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev.max_vqs = qdma_config->max_vqs;
+
+ /* Allocate FLE pool; just append PID so that in case of
+ * multiprocess, the pool's don't collide.
+ */
+ snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
+ getpid());
+ qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
+ qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
+ QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev.fle_pool) {
+ DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+ return -ENOMEM;
+ }
+ qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
+
+ if (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {
+ dpdmai_dev_get_job = dpdmai_dev_get_job_us;
+ dpdmai_dev_set_fd = dpdmai_dev_set_fd_us;
+ } else {
+ dpdmai_dev_get_job = dpdmai_dev_get_job_lf;
+ dpdmai_dev_set_fd = dpdmai_dev_set_fd_lf;
+ }
+ return 0;
+}
+
+int
+rte_qdma_start(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 1;
+
+ return 0;
+}
+
+int
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
+{
+ char ring_name[32];
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ /* Get a free Virtual Queue */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use == 0)
+ break;
+ }
+
+ /* Return in case no VQ is free */
+ if (i == qdma_dev.max_vqs) {
+ rte_spinlock_unlock(&qdma_dev.lock);
+ DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
+ return -ENODEV;
+ }
+
+ if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
+ (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
+ /* Allocate HW queue for a VQ */
+ qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 1;
+ } else {
+ /* Allocate a Ring for Virutal Queue in VQ mode */
+ snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
+ qdma_vqs[i].status_ring = rte_ring_create(ring_name,
+ qdma_dev.fle_pool_count, rte_socket_id(), 0);
+ if (!qdma_vqs[i].status_ring) {
+ DPAA2_QDMA_ERR("Status ring creation failed for vq");
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return rte_errno;
+ }
+
+ /* Get a HW queue (shared) for a VQ */
+ qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 0;
+ }
+
+ if (qdma_vqs[i].hw_queue == NULL) {
+ DPAA2_QDMA_ERR("No H/W queue available for VQ");
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ qdma_vqs[i].status_ring = NULL;
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ qdma_vqs[i].in_use = 1;
+ qdma_vqs[i].lcore_id = lcore_id;
+ memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return i;
+}
+
+/*create vq for route-by-port*/
+int
+rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
+ struct rte_qdma_rbp *rbp)
+{
+ int i;
+
+ i = rte_qdma_vq_create(lcore_id, flags);
+
+ memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
+
+ return i;
+}
+
+static int
+dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t txq_id,
+ uint16_t vq_id,
+ struct rte_qdma_rbp *rbp,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
+ struct dpaa2_queue *txq;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+ uint32_t num_to_send = 0;
+ uint16_t num_tx = 0;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ txq = &(dpdmai_dev->tx_queue[txq_id]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));
+
+ while (nb_jobs > 0) {
+ uint32_t loop;
+
+ num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_jobs;
+
+ for (loop = 0; loop < num_to_send; loop++) {
+ ret = dpdmai_dev_set_fd(&fd[loop],
+ job[num_tx], rbp, vq_id);
+ if (ret < 0) {
+ /* Set nb_jobs to loop, so outer while loop
+ * breaks out.
+ */
+ nb_jobs = loop;
+ break;
+ }
+
+ num_tx++;
+ }
+
+ /* Enqueue the packet to the QBMAN */
+ uint32_t enqueue_loop = 0, retry_count = 0;
+ while (enqueue_loop < loop) {
+ ret = qbman_swp_enqueue_multiple(swp,
+ &eqdesc,
+ &fd[enqueue_loop],
+ NULL,
+ loop - enqueue_loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ return num_tx - (loop - enqueue_loop);
+ } else {
+ enqueue_loop += ret;
+ retry_count = 0;
+ }
+ }
+ nb_jobs -= loop;
+ }
+ return num_tx;
+}
+
+int
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ int ret;
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != qdma_vq->lcore_id) {
+ DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
+ vq_id);
+ return -EINVAL;
+ }
+
+ ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
+ qdma_pq->queue_id,
+ vq_id,
+ &qdma_vq->rbp,
+ job,
+ nb_jobs);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
+ return ret;
+ }
+
+ qdma_vq->num_enqueues += ret;
+
+ return ret;
+}
+
+int
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
+}
+
+/* Function to receive a QDMA job for a given device and queue*/
+static int
+dpdmai_dev_dequeue_multijob_prefetch(
+ struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage, *dq_storage1 = NULL;
+ struct qbman_pull_desc pulldesc;
+ struct qbman_swp *swp;
+ struct queue_storage_info_t *q_storage;
+ uint32_t fqid;
+ uint8_t status, pending;
+ uint8_t num_rx = 0;
+ const struct qbman_fd *fd;
+ uint16_t vqid;
+ int ret, pull_size;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ fqid = rxq->fqid;
+ q_storage = rxq->q_storage;
+
+ if (unlikely(!q_storage->active_dqs)) {
+ q_storage->toggle = 0;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ q_storage->last_num_pkts = pull_size;
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ q_storage->last_num_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ while (!qbman_check_command_complete(
+ get_swp_active_dqs(
+ DPAA2_PER_LCORE_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ }
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN(
+ "VDQ command not issued.QBMAN busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
+ dq_storage);
+ }
+
+ dq_storage = q_storage->active_dqs;
+ rte_prefetch0((void *)(size_t)(dq_storage));
+ rte_prefetch0((void *)(size_t)(dq_storage + 1));
+
+ /* Prepare next pull descriptor. This will give space for the
+ * prefething done on DQRR entries
+ */
+ q_storage->toggle ^= 1;
+ dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc, pull_size);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
+ clear_swp_active_dqs(q_storage->active_dpio_id);
+
+ pending = 1;
+
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
+ if (vq_id)
+ vq_id[num_rx] = vqid;
+
+ dq_storage++;
+ num_rx++;
+ } while (pending);
+
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ while (!qbman_check_command_complete(
+ get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ }
+ /* issue a volatile dequeue command for next pull */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN("VDQ command is not issued."
+ "QBMAN is busy (2)\n");
+ continue;
+ }
+ break;
+ }
+
+ q_storage->active_dqs = dq_storage1;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+
+ return num_rx;
+}
+
+static int
+dpdmai_dev_dequeue_multijob_no_prefetch(
+ struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage;
+ struct qbman_pull_desc pulldesc;
+ struct qbman_swp *swp;
+ uint32_t fqid;
+ uint8_t status, pending;
+ uint8_t num_rx = 0;
+ const struct qbman_fd *fd;
+ uint16_t vqid;
+ int ret, next_pull = nb_jobs, num_pulled = 0;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ fqid = rxq->fqid;
+
+ do {
+ dq_storage = rxq->q_storage->dq_storage[0];
+ /* Prepare dequeue descriptor */
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ if (next_pull > dpaa2_dqrr_size) {
+ qbman_pull_desc_set_numframes(&pulldesc,
+ dpaa2_dqrr_size);
+ next_pull -= dpaa2_dqrr_size;
+ } else {
+ qbman_pull_desc_set_numframes(&pulldesc, next_pull);
+ next_pull = 0;
+ }
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ rte_prefetch0((void *)((size_t)(dq_storage + 1)));
+ /* Check if the previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ num_pulled = 0;
+ pending = 1;
+
+ do {
+ /* Loop until dq_storage is updated
+ * with new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status &
+ QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
+ if (vq_id)
+ vq_id[num_rx] = vqid;
+
+ dq_storage++;
+ num_rx++;
+ num_pulled++;
+
+ } while (pending);
+ /* Last VDQ provided all packets and more packets are requested */
+ } while (next_pull && num_pulled == dpaa2_dqrr_size);
+
+ return num_rx;
+}
+
+int
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct qdma_virt_queue *temp_qdma_vq;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ int ring_count, ret = 0, i;
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
+ DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
+ vq_id);
+ return -1;
+ }
+
+ /* Only dequeue when there are pending jobs on VQ */
+ if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
+ return 0;
+
+ if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
+ nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
+
+ if (qdma_vq->exclusive_hw_queue) {
+ /* In case of exclusive queue directly fetch from HW queue */
+ ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
+ NULL, job, nb_jobs);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR(
+ "Dequeue from DPDMAI device failed: %d", ret);
+ return ret;
+ }
+ qdma_vq->num_dequeues += ret;
+ } else {
+ uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
+ /*
+ * Get the QDMA completed jobs from the software ring.
+ * In case they are not available on the ring poke the HW
+ * to fetch completed jobs from corresponding HW queues
+ */
+ ring_count = rte_ring_count(qdma_vq->status_ring);
+ if (ring_count < nb_jobs) {
+ /* TODO - How to have right budget */
+ ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
+ qdma_pq->queue_id,
+ temp_vq_id, job, nb_jobs);
+ for (i = 0; i < ret; i++) {
+ temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
+ rte_ring_enqueue(temp_qdma_vq->status_ring,
+ (void *)(job[i]));
+ }
+ ring_count = rte_ring_count(
+ qdma_vq->status_ring);
+ }
+
+ if (ring_count) {
+ /* Dequeue job from the software ring
+ * to provide to the user
+ */
+ ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
+ (void **)job, ring_count, NULL);
+ if (ret)
+ qdma_vq->num_dequeues += ret;
+ }
+ }
+
+ return ret;
+}
+
+struct rte_qdma_job *
+rte_qdma_vq_dequeue(uint16_t vq_id)
+{
+ int ret;
+ struct rte_qdma_job *job = NULL;
+
+ ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
+ if (ret < 0)
+ DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
+
+ return job;
+}
+
+void
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_status)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ if (qdma_vq->in_use) {
+ vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
+ vq_status->lcore_id = qdma_vq->lcore_id;
+ vq_status->num_enqueues = qdma_vq->num_enqueues;
+ vq_status->num_dequeues = qdma_vq->num_dequeues;
+ vq_status->num_pending_jobs = vq_status->num_enqueues -
+ vq_status->num_dequeues;
+ }
+}
+
+int
+rte_qdma_vq_destroy(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+ return -EBUSY;
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ if (qdma_vq->exclusive_hw_queue)
+ free_hw_queue(qdma_vq->hw_queue);
+ else {
+ if (qdma_vqs->status_ring)
+ rte_ring_free(qdma_vqs->status_ring);
+
+ put_hw_queue(qdma_vq->hw_queue);
+ }
+
+ memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return 0;
+}
+
+int
+rte_qdma_vq_destroy_rbp(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+ return -EBUSY;
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ if (qdma_vq->exclusive_hw_queue) {
+ free_hw_queue(qdma_vq->hw_queue);
+ } else {
+ if (qdma_vqs->status_ring)
+ rte_ring_free(qdma_vqs->status_ring);
+
+ put_hw_queue(qdma_vq->hw_queue);
+ }
+
+ memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return 0;
+}
+
+void
+rte_qdma_stop(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 0;
+}
+
+void
+rte_qdma_destroy(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_qdma_reset();
+}
+
+static const struct rte_rawdev_ops dpaa2_qdma_ops;
+
+static int
+add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
+ if (!queue) {
+ DPAA2_QDMA_ERR(
+ "Memory allocation failed for QDMA queue");
+ return -ENOMEM;
+ }
+
+ queue->dpdmai_dev = dpdmai_dev;
+ queue->queue_id = i;
+
+ TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
+ qdma_dev.num_hw_queues++;
+ }
+
+ return 0;
+}
+
+static void
+remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue = NULL;
+ struct qdma_hw_queue *tqueue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
+ if (queue->dpdmai_dev == dpdmai_dev) {
+ TAILQ_REMOVE(&qdma_queue_list, queue, next);
+ rte_free(queue);
+ queue = NULL;
+ }
+ }
+}
+
+static int
+dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Remove HW queues from global list */
+ remove_hw_queues_from_list(dpdmai_dev);
+
+ ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("dmdmai disable failed");
+
+ /* Set up the DQRR storage for Rx */
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+
+ /* Close the device at underlying layer*/
+ ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("Failure closing dpdmai device");
+
+ return 0;
+}
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static int
+dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpdmai_attr attr;
+ struct dpdmai_rx_queue_attr rx_attr;
+ struct dpdmai_tx_queue_attr tx_attr;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Open DPDMAI device */
+ dpdmai_dev->dpdmai_id = dpdmai_id;
+ dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+ ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+ return ret;
+ }
+
+ /* Get DPDMAI attributes */
+ ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, &attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->num_queues = attr.num_of_queues;
+
+ /* Set up Rx Queues */
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+ ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+ CMD_PRI_LOW,
+ dpdmai_dev->token,
+ i, 0, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
+ ret);
+ goto init_err;
+ }
+
+ /* Allocate DQ storage for the DPDMAI Rx queues */
+ rxq = &(dpdmai_dev->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_QDMA_ERR("q_storage allocation failed");
+ ret = -ENOMEM;
+ goto init_err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
+ goto init_err;
+ }
+ }
+
+ /* Get Rx and Tx queues FQID's */
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, 0, &rx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, 0, &tx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ /* Enable the device */
+ ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+ goto init_err;
+ }
+
+ /* Add the HW queue to the global list */
+ ret = add_hw_queues_to_list(dpdmai_dev);
+ if (ret) {
+ DPAA2_QDMA_ERR("Adding H/W queue to list failed");
+ goto init_err;
+ }
+
+ if (dpaa2_get_devargs(rawdev->device->devargs,
+ DPAA2_QDMA_NO_PREFETCH)) {
+ /* If no prefetch is configured. */
+ dpdmai_dev_dequeue_multijob =
+ dpdmai_dev_dequeue_multijob_no_prefetch;
+ DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
+ } else {
+ dpdmai_dev_dequeue_multijob =
+ dpdmai_dev_dequeue_multijob_prefetch;
+ }
+
+ if (!dpaa2_coherent_no_alloc_cache) {
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ dpaa2_coherent_no_alloc_cache =
+ DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
+ dpaa2_coherent_alloc_cache =
+ DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
+ } else {
+ dpaa2_coherent_no_alloc_cache =
+ DPAA2_COHERENT_NO_ALLOCATE_CACHE;
+ dpaa2_coherent_alloc_cache =
+ DPAA2_COHERENT_ALLOCATE_CACHE;
+ }
+ }
+
+ DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+
+ return 0;
+init_err:
+ dpaa2_dpdmai_dev_uninit(rawdev);
+ return ret;
+}
+
+static int
+rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
+ sizeof(struct dpaa2_dpdmai_dev),
+ rte_socket_id());
+ if (!rawdev) {
+ DPAA2_QDMA_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ dpaa2_dev->rawdev = rawdev;
+ rawdev->dev_ops = &dpaa2_qdma_ops;
+ rawdev->device = &dpaa2_dev->device;
+ rawdev->driver_name = dpaa2_drv->driver.name;
+
+ /* Invoke PMD device initialization function */
+ ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
+ if (ret) {
+ rte_rawdev_pmd_release(rawdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ dpaa2_dpdmai_dev_uninit(rawdev);
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ DPAA2_QDMA_ERR("Device cleanup failed");
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_QDMA,
+ .probe = rte_dpaa2_qdma_probe,
+ .remove = rte_dpaa2_qdma_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
+ "no_prefetch=<int> ");
+
+RTE_INIT(dpaa2_qdma_init_log)
+{
+ dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
+ if (dpaa2_qdma_logtype >= 0)
+ rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
new file mode 100644
index 000000000..017638075
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef __DPAA2_QDMA_H__
+#define __DPAA2_QDMA_H__
+
+struct qdma_sdd;
+struct rte_qdma_job;
+
+#define DPAA2_QDMA_MAX_FLE 3
+#define DPAA2_QDMA_MAX_SDD 2
+
+#define DPAA2_DPDMAI_MAX_QUEUES 8
+
+/** FLE pool size: 3 Frame list + 2 source/destination descriptor */
+#define QDMA_FLE_POOL_SIZE (sizeof(struct rte_qdma_job *) + \
+ sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
+ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
+/** FLE pool cache size */
+#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
+
+/** Notification by FQD_CTX[fqid] */
+#define QDMA_SER_CTX (1 << 8)
+#define DPAA2_RBP_MEM_RW 0x0
+/**
+ * Source descriptor command read transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_COHERENT_NO_ALLOCATE_CACHE 0xb
+#define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE 0x7
+/**
+ * Destination descriptor command write transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_COHERENT_ALLOCATE_CACHE 0x6
+#define DPAA2_LX2_COHERENT_ALLOCATE_CACHE 0xb
+
+/** Maximum possible H/W Queues on each core */
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+/**
+ * Represents a QDMA device.
+ * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
+ */
+struct qdma_device {
+ /** total number of hw queues. */
+ uint16_t num_hw_queues;
+ /**
+ * Maximum number of hw queues to be alocated per core.
+ * This is limited by MAX_HW_QUEUE_PER_CORE
+ */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /** Device state - started or stopped */
+ uint8_t state;
+ /** FLE pool for the device */
+ struct rte_mempool *fle_pool;
+ /** FLE pool size */
+ int fle_pool_count;
+ /** A lock to QDMA device whenever required */
+ rte_spinlock_t lock;
+};
+
+/** Represents a QDMA H/W queue */
+struct qdma_hw_queue {
+ /** Pointer to Next instance */
+ TAILQ_ENTRY(qdma_hw_queue) next;
+ /** DPDMAI device to communicate with HW */
+ struct dpaa2_dpdmai_dev *dpdmai_dev;
+ /** queue ID to communicate with HW */
+ uint16_t queue_id;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** Number of users of this hw queue */
+ uint32_t num_users;
+};
+
+/** Represents a QDMA virtual queue */
+struct qdma_virt_queue {
+ /** Status ring of the virtual queue */
+ struct rte_ring *status_ring;
+ /** Associated hw queue */
+ struct qdma_hw_queue *hw_queue;
+ /** Route by port */
+ struct rte_qdma_rbp rbp;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** States if this vq is in use or not */
+ uint8_t in_use;
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+};
+
+/** Represents a QDMA per core hw queues allocation in virtual mode */
+struct qdma_per_core_info {
+ /** list for allocated hw queues */
+ struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
+ /* Number of hw queues allocated for this core */
+ uint16_t num_hw_queues;
+};
+
+/** Source/Destination Descriptor */
+struct qdma_sdd {
+ uint32_t rsv;
+ /** Stride configuration */
+ uint32_t stride;
+ /** Route-by-port command */
+ union {
+ uint32_t rbpcmd;
+ struct rbpcmd_st {
+ uint32_t vfid:6;
+ uint32_t rsv4:2;
+ uint32_t pfid:1;
+ uint32_t rsv3:7;
+ uint32_t attr:3;
+ uint32_t rsv2:1;
+ uint32_t at:2;
+ uint32_t vfa:1;
+ uint32_t ca:1;
+ uint32_t tc:3;
+ uint32_t rsv1:5;
+ } rbpcmd_simple;
+ };
+ union {
+ uint32_t cmd;
+ struct rcmd_simple {
+ uint32_t portid:4;
+ uint32_t rsv1:14;
+ uint32_t rbp:1;
+ uint32_t ssen:1;
+ uint32_t rthrotl:4;
+ uint32_t sqos:3;
+ uint32_t ns:1;
+ uint32_t rdtype:4;
+ } read_cmd;
+ struct wcmd_simple {
+ uint32_t portid:4;
+ uint32_t rsv3:10;
+ uint32_t rsv2:2;
+ uint32_t lwc:2;
+ uint32_t rbp:1;
+ uint32_t dsen:1;
+ uint32_t rsv1:4;
+ uint32_t dqos:3;
+ uint32_t ns:1;
+ uint32_t wrttype:4;
+ } write_cmd;
+ };
+} __rte_packed;
+
+/** Represents a DPDMAI raw device */
+struct dpaa2_dpdmai_dev {
+ /** Pointer to Next device instance */
+ TAILQ_ENTRY(dpaa2_qdma_device) next;
+ /** handle to DPDMAI object */
+ struct fsl_mc_io dpdmai;
+ /** HW ID for DPDMAI object */
+ uint32_t dpdmai_id;
+ /** Tocken of this device */
+ uint16_t token;
+ /** Number of queue in this DPDMAI device */
+ uint8_t num_queues;
+ /** RX queues */
+ struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
+ /** TX queues */
+ struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
+};
+
+#endif /* __DPAA2_QDMA_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
new file mode 100644
index 000000000..4779e4cec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_LOGS_H__
+#define __DPAA2_QDMA_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_qdma_logtype;
+
+#define DPAA2_QDMA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_qdma_logtype, "dpaa2_qdma: " \
+ fmt "\n", ## args)
+
+#define DPAA2_QDMA_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_DEBUG(">>")
+
+#define DPAA2_QDMA_INFO(fmt, args...) \
+ DPAA2_QDMA_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_ERR(fmt, args...) \
+ DPAA2_QDMA_LOG(ERR, fmt, ## args)
+#define DPAA2_QDMA_WARN(fmt, args...) \
+ DPAA2_QDMA_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_qdma: " fmt "\n", ## args)
+
+#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_QDMA_DP_INFO(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_DP_WARN(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_QDMA_LOGS_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build
new file mode 100644
index 000000000..b9b08153a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+reason = 'missing dependency, DPDK DPAA2 mempool driver'
+deps += ['rawdev', 'mempool_dpaa2', 'ring', 'kvargs']
+sources = files('dpaa2_qdma.c')
+
+install_headers('rte_pmd_dpaa2_qdma.h')
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
new file mode 100644
index 000000000..4e1268cc5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_QDMA_H__
+#define __RTE_PMD_DPAA2_QDMA_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 QDMA specific structures.
+ *
+ */
+
+/** Maximum qdma burst size */
+#define RTE_QDMA_BURST_NB_MAX 256
+
+/** Determines the mode of operation */
+enum {
+ /**
+ * Allocate a H/W queue per VQ i.e. Exclusive hardware queue for a VQ.
+ * This mode will have best performance.
+ */
+ RTE_QDMA_MODE_HW,
+ /**
+ * A VQ shall not have an exclusive associated H/W queue.
+ * Rather a H/W Queue will be shared by multiple Virtual Queues.
+ * This mode will have intermediate data structures to support
+ * multi VQ to PQ mappings thus having some performance implications.
+ * Note: Even in this mode there is an option to allocate a H/W
+ * queue for a VQ. Please see 'RTE_QDMA_VQ_EXCLUSIVE_PQ' flag.
+ */
+ RTE_QDMA_MODE_VIRTUAL
+};
+
+/** Determines the format of FD */
+enum {
+ RTE_QDMA_LONG_FORMAT,
+ RTE_QDMA_ULTRASHORT_FORMAT,
+};
+
+/**
+ * If user has configured a Virtual Queue mode, but for some particular VQ
+ * user needs an exclusive H/W queue associated (for better performance
+ * on that particular VQ), then user can pass this flag while creating the
+ * Virtual Queue. A H/W queue will be allocated corresponding to
+ * VQ which uses this flag.
+ */
+#define RTE_QDMA_VQ_EXCLUSIVE_PQ (1ULL)
+
+/** States if the source addresses is physical. */
+#define RTE_QDMA_JOB_SRC_PHY (1ULL)
+
+/** States if the destination addresses is physical. */
+#define RTE_QDMA_JOB_DEST_PHY (1ULL << 1)
+
+/** Provides QDMA device attributes */
+struct rte_qdma_attr {
+ /** total number of hw QDMA queues present */
+ uint16_t num_hw_queues;
+};
+
+/** QDMA device configuration structure */
+struct rte_qdma_config {
+ /** Number of maximum hw queues to allocate per core. */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's to be used. */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /** FD format */
+ uint8_t format;
+ /**
+ * User provides this as input to the driver as a size of the FLE pool.
+ * FLE's (and corresponding source/destination descriptors) are
+ * allocated by the driver at enqueue time to store src/dest and
+ * other data and are freed at the dequeue time. This determines the
+ * maximum number of inflight jobs on the QDMA device. This should
+ * be power of 2.
+ */
+ int fle_pool_count;
+};
+
+struct rte_qdma_rbp {
+ uint32_t use_ultrashort:1;
+ uint32_t enable:1;
+ /**
+ * dportid:
+ * 0000 PCI-Express 1
+ * 0001 PCI-Express 2
+ * 0010 PCI-Express 3
+ * 0011 PCI-Express 4
+ * 0100 PCI-Express 5
+ * 0101 PCI-Express 6
+ */
+ uint32_t dportid:4;
+ uint32_t dpfid:2;
+ uint32_t dvfid:6;
+ /*using route by port for destination */
+ uint32_t drbp:1;
+ /**
+ * sportid:
+ * 0000 PCI-Express 1
+ * 0001 PCI-Express 2
+ * 0010 PCI-Express 3
+ * 0011 PCI-Express 4
+ * 0100 PCI-Express 5
+ * 0101 PCI-Express 6
+ */
+ uint32_t sportid:4;
+ uint32_t spfid:2;
+ uint32_t svfid:6;
+ /* using route by port for source */
+ uint32_t srbp:1;
+ uint32_t rsv:4;
+};
+
+/** Provides QDMA device statistics */
+struct rte_qdma_vq_stats {
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+ /* total number of pending jobs in this VQ */
+ uint64_t num_pending_jobs;
+};
+
+/** Determines a QDMA job */
+struct rte_qdma_job {
+ /** Source Address from where DMA is (to be) performed */
+ uint64_t src;
+ /** Destination Address where DMA is (to be) done */
+ uint64_t dest;
+ /** Length of the DMA operation in bytes. */
+ uint32_t len;
+ /** See RTE_QDMA_JOB_ flags */
+ uint32_t flags;
+ /**
+ * User can specify a context which will be maintained
+ * on the dequeue operation.
+ */
+ uint64_t cnxt;
+ /**
+ * Status of the transaction.
+ * This is filled in the dequeue operation by the driver.
+ * upper 8bits acc_err for route by port.
+ * lower 8bits fd error
+ */
+ uint16_t status;
+ uint16_t vq_id;
+};
+
+/**
+ * Initialize the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int
+rte_qdma_init(void);
+
+/**
+ * Get the QDMA attributes.
+ *
+ * @param qdma_attr
+ * QDMA attributes providing total number of hw queues etc.
+ */
+void
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);
+
+/**
+ * Reset the QDMA device. This API will completely reset the QDMA
+ * device, bringing it to original state as if only rte_qdma_init() API
+ * has been called.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int
+rte_qdma_reset(void);
+
+/**
+ * Configure the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int
+rte_qdma_configure(struct rte_qdma_config *qdma_config);
+
+/**
+ * Start the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int
+rte_qdma_start(void);
+
+/**
+ * Create a Virtual Queue on a particular lcore id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param lcore_id
+ * LCORE ID on which this particular queue would be associated with.
+ * @param flags
+ * RTE_QDMA_VQ_ flags. See macro definitions.
+ *
+ * @returns
+ * - >= 0: Virtual queue ID.
+ * - <0: Error code.
+ */
+int
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
+
+/*create vq for route-by-port*/
+int
+rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
+ struct rte_qdma_rbp *rbp);
+
+/**
+ * Enqueue multiple jobs to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA jobs provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs containing relevant information related to DMA.
+ * @param nb_jobs
+ * Number of QDMA jobs provided by the user.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Enqueue a single job to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA job provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * A QDMA Job containing relevant information related to DMA.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job);
+
+/**
+ * Dequeue multiple completed jobs from a Virtual Queue.
+ * Provides the list of completed jobs capped by nb_jobs.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs returned from the API.
+ * @param nb_jobs
+ * Number of QDMA jobs requested for dequeue by the user.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully received
+ * - <0: Error code.
+ */
+int
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Dequeue a single completed jobs from a Virtual Queue.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ *
+ * @returns
+ * - A completed job or NULL if no job is there.
+ */
+struct rte_qdma_job *
+rte_qdma_vq_dequeue(uint16_t vq_id);
+
+/**
+ * Get a Virtual Queue statistics.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param vq_stats
+ * VQ statistics structure which will be filled in by the driver.
+ */
+void
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_stats);
+
+/**
+ * Destroy the Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ * Virtual Queue ID which needs to be uninitialized.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int
+rte_qdma_vq_destroy(uint16_t vq_id);
+
+/**
+ * Destroy the RBP specific Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ * RBP based Virtual Queue ID which needs to be uninitialized.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+
+int
+rte_qdma_vq_destroy_rbp(uint16_t vq_id);
+/**
+ * Stop QDMA device.
+ */
+void
+rte_qdma_stop(void);
+
+/**
+ * Destroy the QDMA device.
+ */
+void
+rte_qdma_destroy(void);
+
+#endif /* __RTE_PMD_DPAA2_QDMA_H__*/
diff --git a/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_rawdev_dpaa2_qdma_version.map b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_rawdev_dpaa2_qdma_version.map
new file mode 100644
index 000000000..ca6a0d762
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/dpaa2_qdma/rte_rawdev_dpaa2_qdma_version.map
@@ -0,0 +1,20 @@
+DPDK_20.0 {
+ global:
+
+ rte_qdma_attr_get;
+ rte_qdma_configure;
+ rte_qdma_destroy;
+ rte_qdma_init;
+ rte_qdma_reset;
+ rte_qdma_start;
+ rte_qdma_stop;
+ rte_qdma_vq_create;
+ rte_qdma_vq_dequeue;
+ rte_qdma_vq_dequeue_multi;
+ rte_qdma_vq_destroy;
+ rte_qdma_vq_enqueue;
+ rte_qdma_vq_enqueue_multi;
+ rte_qdma_vq_stats;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/Makefile b/src/spdk/dpdk/drivers/raw/ifpga/Makefile
new file mode 100644
index 000000000..371e127f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_rawdev_ifpga.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev
+CFLAGS += -I$(RTE_SDK)/drivers/net/i40e
+CFLAGS += -I$(RTE_SDK)/drivers/net/ipn3ke
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_ifpga
+LDLIBS += -lpthread
+LDLIBS += -lfdt
+LDLIBS += -lrte_pmd_i40e
+LDLIBS += -lrte_pmd_ipn3ke
+
+EXPORT_MAP := rte_rawdev_ifpga_version.map
+
+VPATH += $(SRCDIR)/base
+
+include $(RTE_SDK)/drivers/raw/ifpga/base/Makefile
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/Makefile b/src/spdk/dpdk/drivers/raw/ifpga/base/Makefile
new file mode 100644
index 000000000..c5bbcbd5f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/Makefile
@@ -0,0 +1,32 @@
+#SPDX-License-Identifier: BSD-3-Clause
+#Copyright(c) 2010-2018 Intel Corporation
+
+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
+OSDEP := osdep_rte
+else
+OSDEP := osdep_raw
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
+
+SRCS-y += ifpga_api.c
+SRCS-y += ifpga_enumerate.c
+SRCS-y += ifpga_feature_dev.c
+SRCS-y += ifpga_fme.c
+SRCS-y += ifpga_fme_iperf.c
+SRCS-y += ifpga_fme_dperf.c
+SRCS-y += ifpga_fme_error.c
+SRCS-y += ifpga_port.c
+SRCS-y += ifpga_port_error.c
+SRCS-y += opae_hw_api.c
+SRCS-y += opae_ifpga_hw_api.c
+SRCS-y += opae_debug.c
+SRCS-y += ifpga_fme_pr.c
+SRCS-y += opae_spi.c
+SRCS-y += opae_spi_transaction.c
+SRCS-y += opae_intel_max10.c
+SRCS-y += opae_i2c.c
+SRCS-y += opae_at24_eeprom.c
+SRCS-y += opae_eth_group.c
+
+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/README b/src/spdk/dpdk/drivers/raw/ifpga/base/README
new file mode 100644
index 000000000..6b2b171b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/README
@@ -0,0 +1,46 @@
+..
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+Intel iFPGA driver
+==================
+
+This directory contains source code of Intel FPGA driver released by
+the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).
+The directory of base/ contains the original source package. The base code
+currently supports Intel FPGA solutions including integrated solution (Intel(R)
+Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration
+Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA
+devices in the future.
+
+Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.
+
+[1] https://01.org/OPAE
+[2] https://www.altera.com/solutions/acceleration-hub/overview.html
+
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ osdep_raw/osdep_generic.h
+ osdep_rte/osdep_generic.h
+
+
+New Features
+==================
+
+2019-03:
+Support Intel FPGA PAC N3000 card.
+Some features added in this version:
+1. Store private features in FME and Port list.
+2. Add eth group devices driver.
+3. Add altera SPI master driver and Intel MAX10 device driver.
+4. Add Altera I2C master driver and AT24 eeprom driver.
+5. Add Device Tree support to get the configuration from card.
+6. Instruding and exposing APIs to DPDK PMD driver to access networking
+functionality.
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.c
new file mode 100644
index 000000000..6dbd7159e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.c
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_api.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+#include "opae_hw_api.h"
+
+/* Accelerator APIs */
+static int ifpga_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return fpga_get_afu_uuid(port, uuid);
+}
+
+static int ifpga_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+ struct fpga_uafu_irq_set irq_set;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ if (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)
+ return -EINVAL;
+
+ port = br->data;
+
+ irq_set.start = start;
+ irq_set.count = count;
+ irq_set.evtfds = evtfds;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_UINT, &irq_set);
+}
+
+static int ifpga_acc_get_info(struct opae_accelerator *acc,
+ struct opae_acc_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -ENODEV;
+
+ info->num_regions = afu_info->num_regions;
+ info->num_irqs = afu_info->num_irqs;
+
+ return 0;
+}
+
+static int ifpga_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (info->index >= afu_info->num_regions)
+ return -EINVAL;
+
+ /* always one RW region only for AFU now */
+ info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;
+ info->len = afu_info->region[info->index].len;
+ info->addr = afu_info->region[info->index].addr;
+ info->phys_addr = afu_info->region[info->index].phys_addr;
+
+ return 0;
+}
+
+static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ switch (byte) {
+ case 8:
+ *(u64 *)data = opae_readq(region->addr + offset);
+ break;
+ case 4:
+ *(u32 *)data = opae_readl(region->addr + offset);
+ break;
+ case 2:
+ *(u16 *)data = opae_readw(region->addr + offset);
+ break;
+ case 1:
+ *(u8 *)data = opae_readb(region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ifpga_acc_write(struct opae_accelerator *acc,
+ unsigned int region_idx, u64 offset,
+ unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ /* normal mmio case */
+ switch (byte) {
+ case 8:
+ opae_writeq(*(u64 *)data, region->addr + offset);
+ break;
+ case 4:
+ opae_writel(*(u32 *)data, region->addr + offset);
+ break;
+ case 2:
+ opae_writew(*(u16 *)data, region->addr + offset);
+ break;
+ case 1:
+ opae_writeb(*(u8 *)data, region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct opae_accelerator_ops ifpga_acc_ops = {
+ .read = ifpga_acc_read,
+ .write = ifpga_acc_write,
+ .set_irq = ifpga_acc_set_irq,
+ .get_info = ifpga_acc_get_info,
+ .get_region_info = ifpga_acc_get_region_info,
+ .get_uuid = ifpga_acc_get_uuid,
+};
+
+/* Bridge APIs */
+static int ifpga_br_reset(struct opae_bridge *br)
+{
+ struct ifpga_port_hw *port = br->data;
+
+ return fpga_port_reset(port);
+}
+
+struct opae_bridge_ops ifpga_br_ops = {
+ .reset = ifpga_br_reset,
+};
+
+/* Manager APIs */
+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, const char *buf,
+ u32 size, u64 *status)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+ struct ifpga_hw *hw = fme->parent;
+
+ return ifpga_pr(hw, id, buf, size, status);
+}
+
+static int ifpga_mgr_get_eth_group_region_info(struct opae_manager *mgr,
+ struct opae_eth_group_region_info *info)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ if (info->group_id >= MAX_ETH_GROUP_DEVICES)
+ return -EINVAL;
+
+ info->phys_addr = fme->eth_group_region[info->group_id].phys_addr;
+ info->addr = fme->eth_group_region[info->group_id].addr;
+ info->len = fme->eth_group_region[info->group_id].len;
+
+ info->mem_idx = fme->nums_acc_region + info->group_id;
+
+ return 0;
+}
+
+static int ifpga_mgr_get_sensor_value(struct opae_manager *mgr,
+ struct opae_sensor_info *sensor,
+ unsigned int *value)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_get_sensor_value(fme, sensor, value);
+}
+
+static int ifpga_mgr_get_board_info(struct opae_manager *mgr,
+ struct opae_board_info **info)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ *info = &fme->board_info;
+
+ return 0;
+}
+
+struct opae_manager_ops ifpga_mgr_ops = {
+ .flash = ifpga_mgr_flash,
+ .get_eth_group_region_info = ifpga_mgr_get_eth_group_region_info,
+ .get_sensor_value = ifpga_mgr_get_sensor_value,
+ .get_board_info = ifpga_mgr_get_board_info,
+};
+
+static int ifpga_mgr_read_mac_rom(struct opae_manager *mgr, int offset,
+ void *buf, int size)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_read_mac_rom(fme, offset, buf, size);
+}
+
+static int ifpga_mgr_write_mac_rom(struct opae_manager *mgr, int offset,
+ void *buf, int size)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_write_mac_rom(fme, offset, buf, size);
+}
+
+static int ifpga_mgr_get_eth_group_nums(struct opae_manager *mgr)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_get_eth_group_nums(fme);
+}
+
+static int ifpga_mgr_get_eth_group_info(struct opae_manager *mgr,
+ u8 group_id, struct opae_eth_group_info *info)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_get_eth_group_info(fme, group_id, info);
+}
+
+static int ifpga_mgr_eth_group_reg_read(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 *data)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_eth_group_read_reg(fme, group_id,
+ type, index, addr, data);
+}
+
+static int ifpga_mgr_eth_group_reg_write(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 data)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_eth_group_write_reg(fme, group_id,
+ type, index, addr, data);
+}
+
+static int ifpga_mgr_get_retimer_info(struct opae_manager *mgr,
+ struct opae_retimer_info *info)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_get_retimer_info(fme, info);
+}
+
+static int ifpga_mgr_get_retimer_status(struct opae_manager *mgr,
+ struct opae_retimer_status *status)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+
+ return fme_mgr_get_retimer_status(fme, status);
+}
+
+/* Network APIs in FME */
+struct opae_manager_networking_ops ifpga_mgr_network_ops = {
+ .read_mac_rom = ifpga_mgr_read_mac_rom,
+ .write_mac_rom = ifpga_mgr_write_mac_rom,
+ .get_eth_group_nums = ifpga_mgr_get_eth_group_nums,
+ .get_eth_group_info = ifpga_mgr_get_eth_group_info,
+ .eth_group_reg_read = ifpga_mgr_eth_group_reg_read,
+ .eth_group_reg_write = ifpga_mgr_eth_group_reg_write,
+ .get_retimer_info = ifpga_mgr_get_retimer_info,
+ .get_retimer_status = ifpga_mgr_get_retimer_status,
+};
+
+/* Adapter APIs */
+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
+{
+ struct ifpga_hw *hw = malloc(sizeof(*hw));
+
+ if (hw) {
+ opae_memset(hw, 0, sizeof(*hw));
+ hw->pci_data = adapter->data;
+ hw->adapter = adapter;
+ if (ifpga_bus_enumerate(hw))
+ goto error;
+ return ifpga_bus_init(hw);
+ }
+
+error:
+ return -ENOMEM;
+}
+
+struct opae_adapter_ops ifpga_adapter_ops = {
+ .enumerate = ifpga_adapter_enumerate,
+};
+
+/**
+ * ifpga_pr - do the partial reconfiguration for a given port device
+ * @hw: pointer to the HW structure
+ * @port_id: the port device id
+ * @buffer: the buffer of the bitstream
+ * @size: the size of the bitstream
+ * @status: hardware status including PR error code if return -EIO.
+ *
+ * @return
+ * - 0: Success, partial reconfiguration finished.
+ * - <0: Error code returned in partial reconfiguration.
+ **/
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, u32 size,
+ u64 *status)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+
+ return do_pr(hw, port_id, buffer, size, status);
+}
+
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_get_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_get_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set)
+{
+ if (!hw || !irq_set)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_irq(&hw->fme, feature_id, irq_set);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_irq(&hw->port[port_id], feature_id, irq_set);
+ }
+
+ return -ENOENT;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.h b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.h
new file mode 100644
index 000000000..051ab8276
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_api.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_API_H_
+#define _IFPGA_API_H_
+
+#include "opae_hw_api.h"
+#include "ifpga_hw.h"
+
+extern struct opae_adapter_ops ifpga_adapter_ops;
+extern struct opae_manager_ops ifpga_mgr_ops;
+extern struct opae_bridge_ops ifpga_br_ops;
+extern struct opae_accelerator_ops ifpga_acc_ops;
+extern struct opae_manager_networking_ops ifpga_mgr_network_ops;
+
+/* common APIs */
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set);
+
+/* FME APIs */
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, u32 size,
+ u64 *status);
+
+#endif /* _IFPGA_API_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_compat.h b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_compat.h
new file mode 100644
index 000000000..cdeca70ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_compat.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMPAT_H_
+#define _IFPGA_COMPAT_H_
+
+#include "opae_osdep.h"
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define IFPGA_PAGE_SHIFT 12
+#define IFPGA_PAGE_SIZE (1 << IFPGA_PAGE_SHIFT)
+#define IFPGA_PAGE_MASK (~(IFPGA_PAGE_SIZE - 1))
+#define IFPGA_PAGE_ALIGN(addr) (((addr) + IFPGA_PAGE_SIZE - 1)\
+ & IFPGA_PAGE_MASK)
+#define IFPGA_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), IFPGA_PAGE_SIZE)
+
+#define readl(addr) opae_readl(addr)
+#define readq(addr) opae_readq(addr)
+#define writel(value, addr) opae_writel(value, addr)
+#define writeq(value, addr) opae_writeq(value, addr)
+
+#define malloc(size) opae_malloc(size)
+#define zmalloc(size) opae_zmalloc(size)
+#define free(size) opae_free(size)
+
+/*
+ * Wait register's _field to be changed to the given value (_expect's _field)
+ * by polling with given interval and timeout.
+ */
+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
+({ \
+ int wait = 0; \
+ int ret = -ETIMEDOUT; \
+ typeof(_expect) value; \
+ for (; wait <= _timeout; wait += _invl) { \
+ value.csr = readq(_reg_addr); \
+ if (_expect._field == value._field) { \
+ ret = 0; \
+ break; \
+ } \
+ udelay(_invl); \
+ } \
+ ret; \
+})
+
+#define __maybe_unused __rte_unused
+
+#define UNUSED(x) (void)(x)
+
+#endif /* _IFPGA_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_defines.h b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_defines.h
new file mode 100644
index 000000000..9f0147d1e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_defines.h
@@ -0,0 +1,1739 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_DEFINES_H_
+#define _IFPGA_DEFINES_H_
+
+#include "ifpga_compat.h"
+
+#define MAX_FPGA_PORT_NUM 4
+
+#define FME_FEATURE_HEADER "fme_hdr"
+#define FME_FEATURE_THERMAL_MGMT "fme_thermal"
+#define FME_FEATURE_POWER_MGMT "fme_power"
+#define FME_FEATURE_GLOBAL_IPERF "fme_iperf"
+#define FME_FEATURE_GLOBAL_ERR "fme_error"
+#define FME_FEATURE_PR_MGMT "fme_pr"
+#define FME_FEATURE_EMIF_MGMT "fme_emif"
+#define FME_FEATURE_HSSI_ETH "fme_hssi"
+#define FME_FEATURE_GLOBAL_DPERF "fme_dperf"
+#define FME_FEATURE_QSPI_FLASH "fme_qspi_flash"
+#define FME_FEATURE_MAX10_SPI "fme_max10_spi"
+#define FME_FEATURE_NIOS_SPI "fme_nios_spi"
+#define FME_FEATURE_I2C_MASTER "fme_i2c_master"
+#define FME_FEATURE_ETH_GROUP "fme_eth_group"
+
+#define PORT_FEATURE_HEADER "port_hdr"
+#define PORT_FEATURE_UAFU "port_uafu"
+#define PORT_FEATURE_ERR "port_err"
+#define PORT_FEATURE_UMSG "port_umsg"
+#define PORT_FEATURE_PR "port_pr"
+#define PORT_FEATURE_UINT "port_uint"
+#define PORT_FEATURE_STP "port_stp"
+
+/*
+ * do not check the revision id as id may be dynamic under
+ * some cases, e.g, UAFU.
+ */
+#define SKIP_REVISION_CHECK 0xff
+
+#define FME_HEADER_REVISION 1
+#define FME_THERMAL_MGMT_REVISION 0
+#define FME_POWER_MGMT_REVISION 1
+#define FME_GLOBAL_IPERF_REVISION 1
+#define FME_GLOBAL_ERR_REVISION 1
+#define FME_PR_MGMT_REVISION 2
+#define FME_HSSI_ETH_REVISION 0
+#define FME_GLOBAL_DPERF_REVISION 0
+#define FME_QSPI_REVISION 0
+#define FME_MAX10_SPI 0
+#define FME_I2C_MASTER 0
+
+#define PORT_HEADER_REVISION 0
+/* UAFU's header info depends on the downloaded GBS */
+#define PORT_UAFU_REVISION SKIP_REVISION_CHECK
+#define PORT_ERR_REVISION 1
+#define PORT_UMSG_REVISION 0
+#define PORT_UINT_REVISION 0
+#define PORT_STP_REVISION 1
+
+#define FEATURE_TYPE_AFU 0x1
+#define FEATURE_TYPE_BBB 0x2
+#define FEATURE_TYPE_PRIVATE 0x3
+#define FEATURE_TYPE_FIU 0x4
+
+#define FEATURE_FIU_ID_FME 0x0
+#define FEATURE_FIU_ID_PORT 0x1
+
+/* Reserved 0xfe for Header, 0xff for AFU*/
+#define FEATURE_ID_FIU_HEADER 0xfe
+#define FEATURE_ID_AFU 0xff
+
+enum fpga_id_type {
+ FME_ID,
+ PORT_ID,
+ FPGA_ID_MAX,
+};
+
+#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define FME_FEATURE_ID_POWER_MGMT 0x2
+#define FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define FME_FEATURE_ID_PR_MGMT 0x5
+#define FME_FEATURE_ID_HSSI_ETH 0x6
+#define FME_FEATURE_ID_GLOBAL_DPERF 0x7
+#define FME_FEATURE_ID_QSPI_FLASH 0x8
+#define FME_FEATURE_ID_EMIF_MGMT 0x9
+#define FME_FEATURE_ID_MAX10_SPI 0xe
+#define FME_FEATURE_ID_NIOS_SPI 0xd
+#define FME_FEATURE_ID_I2C_MASTER 0xf
+#define FME_FEATURE_ID_ETH_GROUP 0x10
+
+#define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define PORT_FEATURE_ID_ERROR 0x10
+#define PORT_FEATURE_ID_UMSG 0x12
+#define PORT_FEATURE_ID_UINT 0x13
+#define PORT_FEATURE_ID_STP 0x14
+#define PORT_FEATURE_ID_UAFU FEATURE_ID_AFU
+
+/*
+ * All headers and structures must be byte-packed to match the spec.
+ */
+#pragma pack(push, 1)
+
+struct feature_header {
+ union {
+ u64 csr;
+ struct {
+ u16 id:12;
+ u8 revision:4;
+ u32 next_header_offset:24;
+ u8 end_of_list:1;
+ u32 reserved:19;
+ u8 type:4;
+ };
+ };
+};
+
+struct feature_bbb_header {
+ struct uuid guid;
+};
+
+struct feature_afu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fiu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fme_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_verid; /* Fabric version ID */
+ u8 socket_id:1; /* Socket id */
+ u8 rsvd1:3; /* Reserved */
+ /* pci0 link available yes /no */
+ u8 pci0_link_avile:1;
+ /* pci1 link available yes /no */
+ u8 pci1_link_avile:1;
+ /* Coherent (QPI/UPI) link available yes /no */
+ u8 qpi_link_avile:1;
+ u8 rsvd2:1; /* Reserved */
+ /* IOMMU or VT-d supported yes/no */
+ u8 iommu_support:1;
+ u8 num_ports:3; /* Number of ports */
+ u8 sf_fab_ctl:1; /* Internal validation bit */
+ u8 rsvd3:3; /* Reserved */
+ /*
+ * Address width supported in bits
+ * BXT -0x26 , SKX -0x30
+ */
+ u8 address_width_bits:6;
+ u8 rsvd4:2; /* Reserved */
+ /* Size of cache supported in kb */
+ u16 cache_size:12;
+ u8 cache_assoc:4; /* Cache Associativity */
+ u16 rsvd5:15; /* Reserved */
+ u8 lock_bit:1; /* Lock bit */
+ };
+ };
+};
+
+#define FME_AFU_ACCESS_PF 0
+#define FME_AFU_ACCESS_VF 1
+
+struct feature_fme_port {
+ union {
+ u64 csr;
+ struct {
+ u32 port_offset:24;
+ u8 reserved1;
+ u8 port_bar:3;
+ u32 reserved2:20;
+ u8 afu_access_control:1;
+ u8 reserved3:4;
+ u8 port_implemented:1;
+ u8 reserved4:3;
+ };
+ };
+};
+
+struct feature_fme_fab_status {
+ union {
+ u64 csr;
+ struct {
+ u8 upilink_status:4; /* UPI Link Status */
+ u8 rsvd1:4; /* Reserved */
+ u8 pci0link_status:1; /* pci0 link status */
+ u8 rsvd2:3; /* Reserved */
+ u8 pci1link_status:1; /* pci1 link status */
+ u64 rsvd3:51; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_base {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Base Address of memory range */
+ u8 protected_base_addrss:4;
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_limit {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Limit Address of memory range */
+ u8 protected_limit_addrss:4;
+ u16 rsvd2:11; /* Reserved */
+ u8 enable:1; /* Enable GENPROTRANGE check */
+ u32 rsvd3; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_dxe_lock {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Determines write access to the DXE region CSRs
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_early_lock:1;
+ /*
+ * Determines write access to the HSSI CSR
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_late_lock:1;
+ u64 rsvd:62;
+ };
+ };
+};
+
+#define HSSI_ID_NO_HASSI 0
+#define HSSI_ID_PCIE_RP 1
+#define HSSI_ID_ETHERNET 2
+
+struct feature_fme_bitstream_id {
+ union {
+ u64 csr;
+ struct {
+ u32 gitrepo_hash:32; /* GIT repository hash */
+ /*
+ * HSSI configuration identifier:
+ * 0 - No HSSI
+ * 1 - PCIe-RP
+ * 2 - Ethernet
+ */
+ u8 hssi_id:4;
+ u16 rsvd1:12; /* Reserved */
+ /* Bitstream version patch number */
+ u8 bs_verpatch:4;
+ /* Bitstream version minor number */
+ u8 bs_verminor:4;
+ /* Bitstream version major number */
+ u8 bs_vermajor:4;
+ /* Bitstream version debug number */
+ u8 bs_verdebug:4;
+ };
+ };
+};
+
+struct feature_fme_bitstream_md {
+ union {
+ u64 csr;
+ struct {
+ /* Seed number userd for synthesis flow */
+ u8 synth_seed:4;
+ /* Synthesis date(day number - 2 digits) */
+ u8 synth_day:8;
+ /* Synthesis date(month number - 2 digits) */
+ u8 synth_month:8;
+ /* Synthesis date(year number - 2 digits) */
+ u8 synth_year:8;
+ u64 rsvd:36; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_iommu_ctrl {
+ union {
+ u64 csr;
+ struct {
+ /* Disables IOMMU prefetcher for C0 channel */
+ u8 prefetch_disableC0:1;
+ /* Disables IOMMU prefetcher for C1 channel */
+ u8 prefetch_disableC1:1;
+ /* Disables IOMMU partial cache line writes */
+ u8 prefetch_wrdisable:1;
+ u8 rsvd1:1; /* Reserved */
+ /*
+ * Select counter and read value from register
+ * iommu_stat.dbg_counters
+ * 0 - Number of 4K page translation response
+ * 1 - Number of 2M page translation response
+ * 2 - Number of 1G page translation response
+ */
+ u8 counter_sel:2;
+ u32 rsvd2:26; /* Reserved */
+ /* Connected to IOMMU SIP Capabilities */
+ u32 capecap_defeature;
+ };
+ };
+};
+
+struct feature_fme_iommu_stat {
+ union {
+ u64 csr;
+ struct {
+ /* Translation Enable bit from IOMMU SIP */
+ u8 translation_enable:1;
+ /* Drain request in progress */
+ u8 drain_req_inprog:1;
+ /* Invalidation current state */
+ u8 inv_state:3;
+ /* C0 Response Buffer current state */
+ u8 respbuffer_stateC0:3;
+ /* C1 Response Buffer current state */
+ u8 respbuffer_stateC1:3;
+ /* Last request ID to IOMMU SIP */
+ u8 last_reqID:4;
+ /* Last IOMMU SIP response ID value */
+ u8 last_respID:4;
+ /* Last IOMMU SIP response status value */
+ u8 last_respstatus:3;
+ /* C0 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC0:1;
+ /* C1 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC1:1;
+ /* C0 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC0:1;
+ /* C1 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC1:1;
+ /* C0 Response FIFO is not empty */
+ u8 respFIFO_notemptyC0:1;
+ /* C1 Response FIFO is not empty */
+ u8 respFIFO_notemptyC1:1;
+ /* C0 Response FIFO overflow detected */
+ u8 respFIFO_overflowC0:1;
+ /* C1 Response FIFO overflow detected */
+ u8 respFIFO_overflowC1:1;
+ /* C0 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC0:1;
+ /* C1 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC1:1;
+ /* Request FIFO overflow detected */
+ u8 reqFIFO_overflow:1;
+ /* IOMMU memory read in progress */
+ u8 memrd_inprog:1;
+ /* IOMMU memory write in progress */
+ u8 memwr_inprog:1;
+ u8 rsvd1:1; /* Reserved */
+ /* Value of counter selected by iommu_ctl.counter_sel */
+ u16 dbg_counters:16;
+ u16 rsvd2:12; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_pcie0_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_bar_lock:1; /* Lock VT-D BAR register */
+ u64 rsvd1:3;
+ u64 rciep:1; /* Configure PCIE0 as RCiEP */
+ u64 rsvd2:59;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR rule is valid or not */
+ /*
+ * SMRR memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR2 rule is valid or not */
+ /*
+ * SMRR2 memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_base {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of base address memory range */
+ u64 me_base:27;
+ u64 rsvd:37;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_limit {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of limit address memory range */
+ u64 me_limit:27;
+ u64 rsvd1:4;
+ u64 enable:1; /* Enable LLPR MESEG rule */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 reserved;
+ u64 scratchpad;
+ struct feature_fme_capability capability;
+ struct feature_fme_port port[MAX_FPGA_PORT_NUM];
+ struct feature_fme_fab_status fab_status;
+ struct feature_fme_bitstream_id bitstream_id;
+ struct feature_fme_bitstream_md bitstream_md;
+ struct feature_fme_genprotrange2_base genprotrange2_base;
+ struct feature_fme_genprotrange2_limit genprotrange2_limit;
+ struct feature_fme_dxe_lock dxe_lock;
+ struct feature_fme_iommu_ctrl iommu_ctrl;
+ struct feature_fme_iommu_stat iommu_stat;
+ struct feature_fme_pcie0_ctrl pcie0_control;
+ struct feature_fme_llpr_smrr_base smrr_base;
+ struct feature_fme_llpr_smrr_mask smrr_mask;
+ struct feature_fme_llpr_smrr2_base smrr2_base;
+ struct feature_fme_llpr_smrr2_mask smrr2_mask;
+ struct feature_fme_llpr_meseg_base meseg_base;
+ struct feature_fme_llpr_meseg_limit meseg_limit;
+};
+
+struct feature_port_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 port_number:2; /* Port Number 0-3 */
+ u8 rsvd1:6; /* Reserved */
+ u16 mmio_size; /* User MMIO size in KB */
+ u8 rsvd2; /* Reserved */
+ u8 sp_intr_num:4; /* Supported interrupts num */
+ u32 rsvd3:28; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_control {
+ union {
+ u64 csr;
+ struct {
+ u8 port_sftrst:1; /* Port Soft Reset */
+ u8 rsvd1:1; /* Reserved */
+ u8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_sftrst_ack:1; /* HW ACK for Soft Reset */
+ u64 rsvd3:59; /* Reserved */
+ };
+ };
+};
+
+#define PORT_POWER_STATE_NORMAL 0
+#define PORT_POWER_STATE_AP1 1
+#define PORT_POWER_STATE_AP2 2
+#define PORT_POWER_STATE_AP6 6
+
+struct feature_port_status {
+ union {
+ u64 csr;
+ struct {
+ u8 port_freeze:1; /* '1' - freezed '0' - normal */
+ u8 rsvd1:7; /* Reserved */
+ u8 power_state:4; /* Power State */
+ u8 ap1_event:1; /* AP1 event was detected */
+ u8 ap2_event:1; /* AP2 event was detected */
+ u64 rsvd2:50; /* Reserved */
+ };
+ };
+};
+
+/* Port Header Register Set */
+struct feature_port_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 port_mailbox;
+ u64 scratchpad;
+ struct feature_port_capability capability;
+ struct feature_port_control control;
+ struct feature_port_status status;
+ u64 rsvd2;
+ u64 user_clk_freq_cmd0;
+ u64 user_clk_freq_cmd1;
+ u64 user_clk_freq_sts0;
+ u64 user_clk_freq_sts1;
+};
+
+struct feature_fme_tmp_threshold {
+ union {
+ u64 csr;
+ struct {
+ u8 tmp_thshold1:7; /* temperature Threshold 1 */
+ /* temperature Threshold 1 enable/disable */
+ u8 tmp_thshold1_enable:1;
+ u8 tmp_thshold2:7; /* temperature Threshold 2 */
+ /* temperature Threshold 2 enable /disable */
+ u8 tmp_thshold2_enable:1;
+ u8 pro_hot_setpoint:7; /* Proc Hot set point */
+ u8 rsvd4:1; /* Reserved */
+ u8 therm_trip_thshold:7; /* Thermeal Trip Threshold */
+ u8 rsvd3:1; /* Reserved */
+ u8 thshold1_status:1; /* Threshold 1 Status */
+ u8 thshold2_status:1; /* Threshold 2 Status */
+ u8 rsvd5:1; /* Reserved */
+ /* Thermeal Trip Threshold status */
+ u8 therm_trip_thshold_status:1;
+ u8 rsvd6:4; /* Reserved */
+ /* Validation mode- Force Proc Hot */
+ u8 valmodeforce:1;
+ /* Validation mode - Therm trip Hot */
+ u8 valmodetherm:1;
+ u8 rsvd2:2; /* Reserved */
+ u8 thshold_policy:1; /* threshold policy */
+ u32 rsvd:19; /* Reserved */
+ };
+ };
+};
+
+/* Temperature Sensor Read values format 1 */
+struct feature_fme_temp_rdsensor_fmt1 {
+ union {
+ u64 csr;
+ struct {
+ /* Reads out FPGA temperature in celsius */
+ u8 fpga_temp:7;
+ u8 rsvd0:1; /* Reserved */
+ /* Temperature reading sequence number */
+ u16 tmp_reading_seq_num;
+ /* Temperature reading is valid */
+ u8 tmp_reading_valid:1;
+ u8 rsvd1:7; /* Reserved */
+ u16 dbg_mode:10; /* Debug mode */
+ u32 rsvd2:22; /* Reserved */
+ };
+ };
+};
+
+/* Temperature sensor read values format 2 */
+struct feature_fme_temp_rdsensor_fmt2 {
+ u64 rsvd; /* Reserved */
+};
+
+/* Temperature Threshold Capability Register */
+struct feature_fme_tmp_threshold_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Temperature Threshold Unsupported */
+ u8 tmp_thshold_disabled:1;
+ u64 rsvd:63; /* Reserved */
+ };
+ };
+};
+
+/* FME THERNAL FEATURE */
+struct feature_fme_thermal {
+ struct feature_header header;
+ struct feature_fme_tmp_threshold threshold;
+ struct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;
+ struct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;
+ struct feature_fme_tmp_threshold_cap threshold_cap;
+};
+
+/* Power Status register */
+struct feature_fme_pm_status {
+ union {
+ u64 csr;
+ struct {
+ /* FPGA Power consumed, The format is to be defined */
+ u32 pwr_consumed:18;
+ /* FPGA Latency Tolerance Reporting */
+ u8 fpga_latency_report:1;
+ u64 rsvd:45; /* Reserved */
+ };
+ };
+};
+
+/* AP Thresholds */
+struct feature_fme_pm_ap_threshold {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Number of clocks (5ns period) for assertion
+ * of FME_data
+ */
+ u8 threshold1:7;
+ u8 rsvd1:1;
+ u8 threshold2:7;
+ u8 rsvd2:1;
+ u8 threshold1_status:1;
+ u8 threshold2_status:1;
+ u64 rsvd3:46; /* Reserved */
+ };
+ };
+};
+
+/* Xeon Power Limit */
+struct feature_fme_pm_xeon_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FPGA Power Limit */
+struct feature_fme_pm_fpga_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FME POWER FEATURE */
+struct feature_fme_power {
+ struct feature_header header;
+ struct feature_fme_pm_status status;
+ struct feature_fme_pm_ap_threshold threshold;
+ struct feature_fme_pm_xeon_limit xeon_limit;
+ struct feature_fme_pm_fpga_limit fpga_limit;
+};
+
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+
+enum iperf_cache_events {
+ IPERF_CACHE_RD_HIT,
+ IPERF_CACHE_WR_HIT,
+ IPERF_CACHE_RD_MISS,
+ IPERF_CACHE_WR_MISS,
+ IPERF_CACHE_RSVD, /* reserved */
+ IPERF_CACHE_HOLD_REQ,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN,
+ IPERF_CACHE_TX_REQ_STALL,
+ IPERF_CACHE_RX_REQ_STALL,
+ IPERF_CACHE_EVICTIONS,
+};
+
+/* FPMON Cache Control */
+struct feature_fme_ifpmon_ch_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd1:7; /* Reserved */
+ u8 freeze:1; /* Freeze if set to 1 */
+ u8 rsvd2:7; /* Reserved */
+ u8 cache_event:4; /* Select the cache event */
+ u8 cci_chsel:1; /* Select the channel */
+ u64 rsvd3:43; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Cache Counter */
+struct feature_fme_ifpmon_ch_ctr {
+ union {
+ u64 csr;
+ struct {
+ /* Cache Counter for even addresse */
+ u64 cache_counter:48;
+ u16 rsvd:12; /* Reserved */
+ /* Cache Event being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+enum iperf_fab_events {
+ IPERF_FAB_PCIE0_RD,
+ IPERF_FAB_PCIE0_WR,
+ IPERF_FAB_PCIE1_RD,
+ IPERF_FAB_PCIE1_WR,
+ IPERF_FAB_UPI_RD,
+ IPERF_FAB_UPI_WR,
+ IPERF_FAB_MMIO_RD,
+ IPERF_FAB_MMIO_WR,
+};
+
+#define FAB_DISABLE_FILTER 0
+#define FAB_ENABLE_FILTER 1
+
+/* FPMON FAB Control */
+struct feature_fme_ifpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_ifpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_ifpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+enum iperf_vtd_events {
+ IPERF_VTD_AFU_MEM_RD_TRANS,
+ IPERF_VTD_AFU_MEM_WR_TRANS,
+ IPERF_VTD_AFU_DEVTLB_RD_HIT,
+ IPERF_VTD_AFU_DEVTLB_WR_HIT,
+ IPERF_VTD_DEVTLB_4K_FILL,
+ IPERF_VTD_DEVTLB_2M_FILL,
+ IPERF_VTD_DEVTLB_1G_FILL,
+};
+
+/* VT-d control register */
+struct feature_fme_ifpmon_vtd_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d event counter */
+struct feature_fme_ifpmon_vtd_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+enum iperf_vtd_sip_events {
+ IPERF_VTD_SIP_IOTLB_4K_HIT,
+ IPERF_VTD_SIP_IOTLB_2M_HIT,
+ IPERF_VTD_SIP_IOTLB_1G_HIT,
+ IPERF_VTD_SIP_SLPWC_L3_HIT,
+ IPERF_VTD_SIP_SLPWC_L4_HIT,
+ IPERF_VTD_SIP_RCC_HIT,
+ IPERF_VTD_SIP_IOTLB_4K_MISS,
+ IPERF_VTD_SIP_IOTLB_2M_MISS,
+ IPERF_VTD_SIP_IOTLB_1G_MISS,
+ IPERF_VTD_SIP_SLPWC_L3_MISS,
+ IPERF_VTD_SIP_SLPWC_L4_MISS,
+ IPERF_VTD_SIP_RCC_MISS,
+};
+
+/* VT-d SIP control register */
+struct feature_fme_ifpmon_vtd_sip_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d SIP event counter */
+struct feature_fme_ifpmon_vtd_sip_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+/* FME IPERF FEATURE */
+struct feature_fme_iperf {
+ struct feature_header header;
+ struct feature_fme_ifpmon_ch_ctl ch_ctl;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr0;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr1;
+ struct feature_fme_ifpmon_fab_ctl fab_ctl;
+ struct feature_fme_ifpmon_fab_ctr fab_ctr;
+ struct feature_fme_ifpmon_clk_ctr clk;
+ struct feature_fme_ifpmon_vtd_ctl vtd_ctl;
+ struct feature_fme_ifpmon_vtd_ctr vtd_ctr;
+ struct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;
+};
+
+enum dperf_fab_events {
+ DPERF_FAB_PCIE0_RD,
+ DPERF_FAB_PCIE0_WR,
+ DPERF_FAB_MMIO_RD = 6,
+ DPERF_FAB_MMIO_WR,
+};
+
+/* FPMON FAB Control */
+struct feature_fme_dfpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_dfpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_dfpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+/* FME DPERF FEATURE */
+struct feature_fme_dperf {
+ struct feature_header header;
+ u64 rsvd[3];
+ struct feature_fme_dfpmon_fab_ctl fab_ctl;
+ struct feature_fme_dfpmon_fab_ctr fab_ctr;
+ struct feature_fme_dfpmon_clk_ctr clk;
+};
+
+struct feature_fme_error0 {
+#define FME_ERROR0_MASK_DEFAULT 0x40UL /* pcode workaround */
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_err:1; /* Fabric error */
+ u8 fabfifo_overflow:1; /* Fabric fifo overflow */
+ u8 reserved2:3;
+ /* AFU PF/VF access mismatch detected */
+ u8 afu_acc_mode_err:1;
+ u8 reserved6:1;
+ /* PCIE0 CDC Parity Error */
+ u8 pcie0cdc_parity_err:5;
+ /* PCIE1 CDC Parity Error */
+ u8 pcie1cdc_parity_err:5;
+ /* CVL CDC Parity Error */
+ u8 cvlcdc_parity_err:3;
+ u8 fpgaseuerr:1;
+ u64 rsvd:43; /* Reserved */
+ };
+ };
+};
+
+/* PCIe0 Error Status register */
+struct feature_fme_pcie0_error {
+#define FME_PCIE0_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:52; /* Reserved */
+ u8 vfnumb_err:1; /* Number of error VF */
+ u8 funct_type_err:1; /* Virtual (1) or Physical */
+ };
+ };
+};
+
+/* PCIe1 Error Status register */
+struct feature_fme_pcie1_error {
+#define FME_PCIE1_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:54; /* Reserved */
+ };
+ };
+};
+
+/* FME First Error register */
+struct feature_fme_first_error {
+#define FME_FIRST_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered first
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered first
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* FME Next Error register */
+struct feature_fme_next_error {
+#define FME_NEXT_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered second
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered second
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* RAS Non Fatal Error Status register */
+struct feature_fme_ras_nonfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* thremal threshold AP1 */
+ u8 temp_thresh_ap1:1;
+ /* thremal threshold AP2 */
+ u8 temp_thresh_ap2:1;
+ u8 pcie_error:1; /* pcie Error */
+ u8 portfatal_error:1; /* port fatal error */
+ u8 proc_hot:1; /* Indicates a ProcHot event */
+ /* Indicates an AFU PF/VF access mismatch */
+ u8 afu_acc_mode_err:1;
+ /* Injected nonfata Error */
+ u8 injected_nonfata_err:1;
+ u8 rsvd1:2;
+ /* Temperature threshold triggered AP6*/
+ u8 temp_thresh_AP6:1;
+ /* Power threshold triggered AP1 */
+ u8 power_thresh_AP1:1;
+ /* Power threshold triggered AP2 */
+ u8 power_thresh_AP2:1;
+ /* Indicates a MBP event */
+ u8 mbp_err:1;
+ u64 rsvd2:51; /* Reserved */
+ };
+ };
+};
+
+/* RAS Catastrophic Fatal Error Status register */
+struct feature_fme_ras_catfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* KTI Link layer error detected */
+ u8 ktilink_fatal_err:1;
+ /* tag-n-cache error detected */
+ u8 tagcch_fatal_err:1;
+ /* CCI error detected */
+ u8 cci_fatal_err:1;
+ /* KTI Protocol error detected */
+ u8 ktiprpto_fatal_err:1;
+ /* Fatal DRAM error detected */
+ u8 dram_fatal_err:1;
+ /* IOMMU detected */
+ u8 iommu_fatal_err:1;
+ /* Fabric Fatal Error */
+ u8 fabric_fatal_err:1;
+ /* PCIe possion Error */
+ u8 pcie_poison_err:1;
+ /* Injected fatal Error */
+ u8 inject_fata_err:1;
+ /* Catastrophic CRC Error */
+ u8 crc_catast_err:1;
+ /* Catastrophic Thermal Error */
+ u8 therm_catast_err:1;
+ /* Injected Catastrophic Error */
+ u8 injected_catast_err:1;
+ /* SEU error on BMC */
+ u8 bmc_seu_catast_err:1;
+ u64 rsvd:51;
+ };
+ };
+};
+
+/* RAS Error injection register */
+struct feature_fme_ras_error_inj {
+#define FME_RAS_ERROR_INJ_MASK 0x7UL
+ union {
+ u64 csr;
+ struct {
+ u8 catast_error:1; /* Catastrophic error flag */
+ u8 fatal_error:1; /* Fatal error flag */
+ u8 nonfatal_error:1; /* NonFatal error flag */
+ u64 rsvd:61; /* Reserved */
+ };
+ };
+};
+
+/* FME error capabilities */
+struct feature_fme_error_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:50; /* Reserved */
+ u64 seu_support:1;
+ };
+ };
+};
+
+/* FME ERR FEATURE */
+struct feature_fme_err {
+ struct feature_header header;
+ struct feature_fme_error0 fme_err_mask;
+ struct feature_fme_error0 fme_err;
+ struct feature_fme_pcie0_error pcie0_err_mask;
+ struct feature_fme_pcie0_error pcie0_err;
+ struct feature_fme_pcie1_error pcie1_err_mask;
+ struct feature_fme_pcie1_error pcie1_err;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ struct feature_fme_ras_nonfaterror ras_nonfat_mask;
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+ struct feature_fme_ras_catfaterror ras_catfat_mask;
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+ struct feature_fme_ras_error_inj ras_error_inj;
+ struct feature_fme_error_capability fme_err_capability;
+ u64 seu_emr_l;
+ u64 seu_emr_h;
+};
+
+/* FME Partial Reconfiguration Control */
+struct feature_fme_pr_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 pr_reset:1; /* Reset PR Engine */
+ u8 rsvd3:3; /* Reserved */
+ u8 pr_reset_ack:1; /* Reset PR Engine Ack */
+ u8 rsvd4:3; /* Reserved */
+ u8 pr_regionid:2; /* PR Region ID */
+ u8 rsvd1:2; /* Reserved */
+ u8 pr_start_req:1; /* PR Start Request */
+ u8 pr_push_complete:1; /* PR Data push complete */
+ u8 pr_kind:1; /* PR Data push complete */
+ u32 rsvd:17; /* Reserved */
+ u32 config_data; /* Config data TBD */
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Status */
+struct feature_fme_pr_status {
+ union {
+ u64 csr;
+ struct {
+ u16 pr_credit:9; /* PR Credits */
+ u8 rsvd2:7; /* Reserved */
+ u8 pr_status:1; /* PR status */
+ u8 rsvd:3; /* Reserved */
+ /* Altra PR Controller Block status */
+ u8 pr_controller_status:3;
+ u8 rsvd1:1; /* Reserved */
+ u8 pr_host_status:4; /* PR Host status */
+ u8 rsvd3:4; /* Reserved */
+ /* Security Block Status fields (TBD) */
+ u32 security_bstatus;
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Data */
+struct feature_fme_pr_data {
+ union {
+ u64 csr; /* PR data from the raw-binary file */
+ struct {
+ /* PR data from the raw-binary file */
+ u32 pr_data_raw;
+ u32 rsvd;
+ };
+ };
+};
+
+/* FME PR Public Key */
+struct feature_fme_pr_key {
+ u64 key; /* FME PR Public Hash */
+};
+
+/* FME PR FEATURE */
+struct feature_fme_pr {
+ struct feature_header header;
+ /*Partial Reconfiguration control */
+ struct feature_fme_pr_ctl ccip_fme_pr_control;
+
+ /* Partial Reconfiguration Status */
+ struct feature_fme_pr_status ccip_fme_pr_status;
+
+ /* Partial Reconfiguration data */
+ struct feature_fme_pr_data ccip_fme_pr_data;
+
+ /* Partial Reconfiguration data */
+ u64 ccip_fme_pr_err;
+
+ u64 rsvd1[3];
+
+ /* Partial Reconfiguration data registers */
+ u64 fme_pr_data1;
+ u64 fme_pr_data2;
+ u64 fme_pr_data3;
+ u64 fme_pr_data4;
+ u64 fme_pr_data5;
+ u64 fme_pr_data6;
+ u64 fme_pr_data7;
+ u64 fme_pr_data8;
+
+ u64 rsvd2[5];
+
+ /* PR Interface ID */
+ u64 fme_pr_intfc_id_l;
+ u64 fme_pr_intfc_id_h;
+
+ /* MSIX filed to be Added */
+};
+
+/* FME HSSI Control */
+struct feature_fme_hssi_eth_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u16 address:16; /* HSSI address */
+ /*
+ * HSSI comamnd
+ * 0x0 - No request
+ * 0x08 - SW register RD request
+ * 0x10 - SW register WR request
+ * 0x40 - Auxiliar bus RD request
+ * 0x80 - Auxiliar bus WR request
+ */
+ u16 cmd:16;
+ };
+ };
+};
+
+/* FME HSSI Status */
+struct feature_fme_hssi_eth_stat {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u8 acknowledge:1; /* HSSI acknowledge */
+ u8 spare:1; /* HSSI spare */
+ u32 rsvd:30; /* Reserved */
+ };
+ };
+};
+
+/* FME HSSI FEATURE */
+struct feature_fme_hssi {
+ struct feature_header header;
+ struct feature_fme_hssi_eth_ctrl hssi_control;
+ struct feature_fme_hssi_eth_stat hssi_status;
+};
+
+#define PORT_ERR_MASK 0xfff0703ff001f
+struct feature_port_err_key {
+ union {
+ u64 csr;
+ struct {
+ /* Tx Channel0: Overflow */
+ u8 tx_ch0_overflow:1;
+ /* Tx Channel0: Invalid request encoding */
+ u8 tx_ch0_invaldreq :1;
+ /* Tx Channel0: Request with cl_len=3 not supported */
+ u8 tx_ch0_cl_len3:1;
+ /* Tx Channel0: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch0_cl_len2:1;
+ /* Tx Channel0: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch0_cl_len4:1;
+
+ u16 rsvd1:4; /* Reserved */
+
+ /* AFU MMIO RD received while PORT is in reset */
+ u8 mmio_rd_whilerst:1;
+ /* AFU MMIO WR received while PORT is in reset */
+ u8 mmio_wr_whilerst:1;
+
+ u16 rsvd2:5; /* Reserved */
+
+ /* Tx Channel1: Overflow */
+ u8 tx_ch1_overflow:1;
+ /* Tx Channel1: Invalid request encoding */
+ u8 tx_ch1_invaldreq:1;
+ /* Tx Channel1: Request with cl_len=3 not supported */
+ u8 tx_ch1_cl_len3:1;
+ /* Tx Channel1: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch1_cl_len2:1;
+ /* Tx Channel1: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch1_cl_len4:1;
+
+ /* Tx Channel1: Insufficient data payload */
+ u8 tx_ch1_insuff_data:1;
+ /* Tx Channel1: Data payload overrun */
+ u8 tx_ch1_data_overrun:1;
+ /* Tx Channel1 : Incorrect address */
+ u8 tx_ch1_incorr_addr:1;
+ /* Tx Channel1 : NON-Zero SOP Detected */
+ u8 tx_ch1_nzsop:1;
+ /* Tx Channel1 : Illegal VC_SEL, atomic request VLO */
+ u8 tx_ch1_illegal_vcsel:1;
+
+ u8 rsvd3:6; /* Reserved */
+
+ /* MMIO Read Timeout in AFU */
+ u8 mmioread_timeout:1;
+
+ /* Tx Channel2: FIFO Overflow */
+ u8 tx_ch2_fifo_overflow:1;
+
+ /* MMIO read is not matching pending request */
+ u8 unexp_mmio_resp:1;
+
+ u8 rsvd4:5; /* Reserved */
+
+ /* Number of pending Requests: counter overflow */
+ u8 tx_req_counter_overflow:1;
+ /* Req with Address violating SMM Range */
+ u8 llpr_smrr_err:1;
+ /* Req with Address violating second SMM Range */
+ u8 llpr_smrr2_err:1;
+ /* Req with Address violating ME Stolen message */
+ u8 llpr_mesg_err:1;
+ /* Req with Address violating Generic Protected Range */
+ u8 genprot_range_err:1;
+ /* Req with Address violating Legacy Range low */
+ u8 legrange_low_err:1;
+ /* Req with Address violating Legacy Range High */
+ u8 legrange_high_err:1;
+ /* Req with Address violating VGA memory range */
+ u8 vgmem_range_err:1;
+ u8 page_fault_err:1; /* Page fault */
+ u8 pmr_err:1; /* PMR Error */
+ u8 ap6_event:1; /* AP6 event */
+ /* VF FLR detected on Port with PF access control */
+ u8 vfflr_access_err:1;
+ u16 rsvd5:12; /* Reserved */
+ };
+ };
+};
+
+/* Port first error register, not contain all error bits in error register. */
+struct feature_port_first_err_key {
+ union {
+ u64 csr;
+ struct {
+ u8 tx_ch0_overflow:1;
+ u8 tx_ch0_invaldreq :1;
+ u8 tx_ch0_cl_len3:1;
+ u8 tx_ch0_cl_len2:1;
+ u8 tx_ch0_cl_len4:1;
+ u8 rsvd1:4; /* Reserved */
+ u8 mmio_rd_whilerst:1;
+ u8 mmio_wr_whilerst:1;
+ u8 rsvd2:5; /* Reserved */
+ u8 tx_ch1_overflow:1;
+ u8 tx_ch1_invaldreq:1;
+ u8 tx_ch1_cl_len3:1;
+ u8 tx_ch1_cl_len2:1;
+ u8 tx_ch1_cl_len4:1;
+ u8 tx_ch1_insuff_data:1;
+ u8 tx_ch1_data_overrun:1;
+ u8 tx_ch1_incorr_addr:1;
+ u8 tx_ch1_nzsop:1;
+ u8 tx_ch1_illegal_vcsel:1;
+ u8 rsvd3:6; /* Reserved */
+ u8 mmioread_timeout:1;
+ u8 tx_ch2_fifo_overflow:1;
+ u8 rsvd4:6; /* Reserved */
+ u8 tx_req_counter_overflow:1;
+ u32 rsvd5:23; /* Reserved */
+ };
+ };
+};
+
+/* Port malformed Req0 */
+struct feature_port_malformed_req0 {
+ u64 header_lsb;
+};
+
+/* Port malformed Req1 */
+struct feature_port_malformed_req1 {
+ u64 header_msb;
+};
+
+/* Port debug register */
+struct feature_port_debug {
+ u64 port_debug;
+};
+
+/* Port error capabilities */
+struct feature_port_err_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE ERROR */
+struct feature_port_error {
+ struct feature_header header;
+ struct feature_port_err_key error_mask;
+ struct feature_port_err_key port_error;
+ struct feature_port_first_err_key port_first_error;
+ struct feature_port_malformed_req0 malreq0;
+ struct feature_port_malformed_req1 malreq1;
+ struct feature_port_debug port_debug;
+ struct feature_port_err_capability error_capability;
+};
+
+/* Port UMSG Capability */
+struct feature_port_umsg_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Number of umsg allocated to this port */
+ u8 umsg_allocated;
+ /* Enable / Disable UMsg engine for this port */
+ u8 umsg_enable:1;
+ /* Usmg initialization status */
+ u8 umsg_init_complete:1;
+ /* IOMMU can not translate the umsg base address */
+ u8 umsg_trans_error:1;
+ u64 rsvd:53; /* Reserved */
+ };
+ };
+};
+
+/* Port UMSG base address */
+struct feature_port_umsg_baseaddr {
+ union {
+ u64 csr;
+ struct {
+ u64 base_addr:48; /* 48 bit physical address */
+ u16 rsvd; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_umsg_mode {
+ union {
+ u64 csr;
+ struct {
+ u32 umsg_hint_enable; /* UMSG hint enable/disable */
+ u32 rsvd; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE UMSG */
+struct feature_port_umsg {
+ struct feature_header header;
+ struct feature_port_umsg_cap capability;
+ struct feature_port_umsg_baseaddr baseaddr;
+ struct feature_port_umsg_mode mode;
+};
+
+#define UMSG_EN_POLL_INVL 10 /* us */
+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */
+
+/* Port UINT Capability */
+struct feature_port_uint_cap {
+ union {
+ u64 csr;
+ struct {
+ u16 intr_num:12; /* Supported interrupts num */
+ /* First MSI-X vector table entry number */
+ u16 first_vec_num:12;
+ u64 rsvd:40;
+ };
+ };
+};
+
+/* PORT FEATURE UINT */
+struct feature_port_uint {
+ struct feature_header header;
+ struct feature_port_uint_cap capability;
+};
+
+/* STP region supports mmap operation, so use page aligned size. */
+#define PORT_FEATURE_STP_REGION_SIZE \
+ IFPGA_PAGE_ALIGN(sizeof(struct feature_port_stp))
+
+/* Port STP status register (for debug only)*/
+struct feature_port_stp_status {
+ union {
+ u64 csr;
+ struct {
+ /* SLD Hub end-point read/write timeout */
+ u8 sld_ep_timeout:1;
+ /* Remote STP in reset/disable */
+ u8 rstp_disabled:1;
+ u8 unsupported_read:1;
+ /* MMIO timeout detected and faked with a response */
+ u8 mmio_timeout:1;
+ u8 txfifo_count:4;
+ u8 rxfifo_count:4;
+ u8 txfifo_overflow:1;
+ u8 txfifo_underflow:1;
+ u8 rxfifo_overflow:1;
+ u8 rxfifo_underflow:1;
+ /* Number of MMIO write requests */
+ u16 write_requests;
+ /* Number of MMIO read requests */
+ u16 read_requests;
+ /* Number of MMIO read responses */
+ u16 read_responses;
+ };
+ };
+};
+
+/*
+ * PORT FEATURE STP
+ * Most registers in STP region are not touched by driver, but mmapped to user
+ * space. So they are not defined in below data structure, as its actual size
+ * is 0x18c per spec.
+ */
+struct feature_port_stp {
+ struct feature_header header;
+ struct feature_port_stp_status stp_status;
+};
+
+/**
+ * enum fpga_pr_states - fpga PR states
+ * @FPGA_PR_STATE_UNKNOWN: can't determine state
+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_PR_STATE_WRITE: writing image to FPGA
+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_PR_STATE_OPERATING: FPGA PR done
+ */
+enum fpga_pr_states {
+ /* canot determine state states */
+ FPGA_PR_STATE_UNKNOWN,
+
+ /* write sequence: init, write, complete */
+ FPGA_PR_STATE_WRITE_INIT,
+ FPGA_PR_STATE_WRITE_INIT_ERR,
+ FPGA_PR_STATE_WRITE,
+ FPGA_PR_STATE_WRITE_ERR,
+ FPGA_PR_STATE_WRITE_COMPLETE,
+ FPGA_PR_STATE_WRITE_COMPLETE_ERR,
+
+ /* FPGA PR done */
+ FPGA_PR_STATE_DONE,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+
+/**
+ * struct fpga_pr_info - specific information to a FPGA PR
+ * @flags: boolean flags as defined above
+ * @pr_err: PR error code
+ * @state: fpga manager state
+ * @port_id: port id
+ */
+struct fpga_pr_info {
+ u32 flags;
+ u64 pr_err;
+ enum fpga_pr_states state;
+ int port_id;
+};
+
+#define DEFINE_FPGA_PR_ERR_MSG(_name_) \
+static const char * const _name_[] = { \
+ "PR operation error detected", \
+ "PR CRC error detected", \
+ "PR incompatiable bitstream error detected", \
+ "PR IP protocol error detected", \
+ "PR FIFO overflow error detected", \
+ "PR timeout error detected", \
+ "PR secure load error detected", \
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+#define PR_WAIT_TIMEOUT 15000000
+
+#define PR_HOST_STATUS_IDLE 0
+#define PR_MAX_ERR_NUM 7
+
+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);
+
+/*
+ * green bitstream header must be byte-packed to match the
+ * real file format.
+ */
+struct bts_header {
+ u64 guid_h;
+ u64 guid_l;
+ u32 metadata_len;
+};
+
+#define GBS_GUID_H 0x414750466e6f6558
+#define GBS_GUID_L 0x31303076534247b7
+#define is_valid_bts(bts_hdr) \
+ (((bts_hdr)->guid_h == GBS_GUID_H) && \
+ ((bts_hdr)->guid_l == GBS_GUID_L))
+
+#define check_support(n) (n == 1 ? "support" : "no")
+
+/* bitstream id definition */
+struct fme_bitstream_id {
+ union {
+ u64 id;
+ struct {
+ u8 build_patch:8;
+ u8 build_minor:8;
+ u8 build_major:8;
+ u8 fvl_bypass:1;
+ u8 mac_lightweight:1;
+ u8 disagregate:1;
+ u8 lightweiht:1;
+ u8 seu:1;
+ u8 ptp:1;
+ u8 reserve:2;
+ u8 interface:4;
+ u32 afu_revision:12;
+ u8 patch:4;
+ u8 minor:4;
+ u8 major:4;
+ u8 reserved:4;
+ };
+ };
+};
+
+enum board_interface {
+ VC_8_10G = 0,
+ VC_4_25G = 1,
+ VC_2_1_25 = 2,
+ VC_4_25G_2_25G = 3,
+ VC_2_2_25G = 4,
+};
+
+enum pac_major {
+ VISTA_CREEK = 0,
+ RUSH_CREEK = 1,
+ DARBY_CREEK = 2,
+};
+
+enum pac_minor {
+ DCP_1_0 = 0,
+ DCP_1_1 = 1,
+ DCP_1_2 = 2,
+};
+
+struct opae_board_info {
+ enum pac_major major;
+ enum pac_minor minor;
+ enum board_interface type;
+
+ /* PAC features */
+ u8 fvl_bypass;
+ u8 mac_lightweight;
+ u8 disaggregate;
+ u8 lightweight;
+ u8 seu;
+ u8 ptp;
+
+ u32 max10_version;
+ u32 nios_fw_version;
+ u32 nums_of_retimer;
+ u32 ports_per_retimer;
+ u32 nums_of_fvl;
+ u32 ports_per_fvl;
+};
+
+#pragma pack(pop)
+#endif /* _BASE_IFPGA_DEFINES_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.c
new file mode 100644
index 000000000..b8846e373
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.c
@@ -0,0 +1,724 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "ifpga_api.h"
+
+#include "ifpga_hw.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+struct build_feature_devs_info {
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_afu_info *acc_info;
+
+ void *fiu;
+ enum fpga_id_type current_type;
+ int current_port_id;
+
+ void *ioaddr;
+ void *ioend;
+ uint64_t phys_addr;
+ int current_bar;
+
+ void *pfme_hdr;
+
+ struct ifpga_hw *hw;
+};
+
+static int feature_revision(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ return header.revision;
+}
+
+static u32 feature_size(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ /*the size of private feature is 4KB aligned*/
+ return header.next_header_offset ? header.next_header_offset:4096;
+}
+
+static u64 feature_id(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ switch (header.type) {
+ case FEATURE_TYPE_FIU:
+ return FEATURE_ID_FIU_HEADER;
+ case FEATURE_TYPE_PRIVATE:
+ return header.id;
+ case FEATURE_TYPE_AFU:
+ return FEATURE_ID_AFU;
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static int
+build_info_add_sub_feature(struct build_feature_devs_info *binfo,
+ void __iomem *start, u64 fid, unsigned int size,
+ unsigned int vec_start,
+ unsigned int vec_cnt)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_feature *feature = NULL;
+ struct feature_irq_ctx *ctx = NULL;
+ int port_id, ret = 0;
+ unsigned int i;
+
+ fid = fid?fid:feature_id(start);
+ size = size?size:feature_size(start);
+
+ feature = opae_malloc(sizeof(struct ifpga_feature));
+ if (!feature)
+ return -ENOMEM;
+
+ feature->state = IFPGA_FEATURE_ATTACHED;
+ feature->addr = start;
+ feature->id = fid;
+ feature->size = size;
+ feature->revision = feature_revision(start);
+ feature->phys_addr = binfo->phys_addr +
+ ((u8 *)start - (u8 *)binfo->ioaddr);
+ feature->vec_start = vec_start;
+ feature->vec_cnt = vec_cnt;
+
+ dev_debug(binfo, "%s: id=0x%llx, phys_addr=0x%llx, size=%u\n",
+ __func__, (unsigned long long)feature->id,
+ (unsigned long long)feature->phys_addr, size);
+
+ if (vec_cnt) {
+ if (vec_start + vec_cnt <= vec_start)
+ return -EINVAL;
+
+ ctx = zmalloc(sizeof(*ctx) * vec_cnt);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < vec_cnt; i++) {
+ ctx[i].eventfd = -1;
+ ctx[i].idx = vec_start + i;
+ }
+ }
+
+ feature->ctx = ctx;
+ feature->ctx_num = vec_cnt;
+ feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
+
+ if (binfo->current_type == FME_ID) {
+ feature->parent = &hw->fme;
+ feature->type = FEATURE_FME_TYPE;
+ feature->name = get_fme_feature_name(fid);
+ TAILQ_INSERT_TAIL(&hw->fme.feature_list, feature, next);
+ } else if (binfo->current_type == PORT_ID) {
+ port_id = binfo->current_port_id;
+ feature->parent = &hw->port[port_id];
+ feature->type = FEATURE_PORT_TYPE;
+ feature->name = get_port_feature_name(fid);
+ TAILQ_INSERT_TAIL(&hw->port[port_id].feature_list,
+ feature, next);
+ } else {
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ void __iomem *start, u64 fid,
+ unsigned int size, unsigned int vec_start,
+ unsigned int vec_cnt)
+{
+ return build_info_add_sub_feature(binfo, start, fid, size, vec_start,
+ vec_cnt);
+}
+
+/*
+ * UAFU GUID is dynamic as it can be changed after FME downloads different
+ * Green Bitstream to the port, so we treat the unknown GUIDs which are
+ * attached on port's feature list as UAFU.
+ */
+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
+{
+ if (binfo->current_type != PORT_ID)
+ return false;
+
+ return true;
+}
+
+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ u64 id = PORT_FEATURE_ID_UAFU;
+ struct ifpga_afu_info *info;
+ void *start = (void *)hdr;
+ struct feature_port_header *port_hdr = binfo->ioaddr;
+ struct feature_port_capability capability;
+ int ret;
+ int size;
+
+ capability.csr = readq(&port_hdr->capability);
+
+ size = capability.mmio_size << 10;
+
+ ret = create_feature_instance(binfo, hdr, id, size, 0, 0);
+ if (ret)
+ return ret;
+
+ info = opae_malloc(sizeof(*info));
+ if (!info)
+ return -ENOMEM;
+
+ info->region[0].addr = start;
+ info->region[0].phys_addr = binfo->phys_addr +
+ (uint8_t *)start - (uint8_t *)binfo->ioaddr;
+ info->region[0].len = size;
+ info->num_regions = 1;
+
+ binfo->acc_info = info;
+
+ return ret;
+}
+
+static int parse_feature_afus(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ int ret;
+ struct feature_afu_header *afu_hdr, header;
+ u8 __iomem *start;
+ u8 __iomem *end = binfo->ioend;
+
+ start = (u8 __iomem *)hdr;
+ for (; start < end; start += header.next_afu) {
+ if ((unsigned int)(end - start) <
+ (unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
+ return -EINVAL;
+
+ hdr = (struct feature_header *)start;
+ afu_hdr = (struct feature_afu_header *)(hdr + 1);
+ header.csr = readq(&afu_hdr->csr);
+
+ if (feature_is_UAFU(binfo)) {
+ ret = parse_feature_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+ }
+
+ if (!header.next_afu)
+ break;
+ }
+
+ return 0;
+}
+
+/* create and register proper private data */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct ifpga_afu_info *info = binfo->acc_info;
+ struct ifpga_hw *hw = binfo->hw;
+ struct opae_manager *mgr;
+ struct opae_bridge *br;
+ struct opae_accelerator *acc;
+ struct ifpga_port_hw *port;
+ struct ifpga_fme_hw *fme;
+ struct ifpga_feature *feature;
+
+ if (!binfo->fiu)
+ return 0;
+
+ if (binfo->current_type == PORT_ID) {
+ /* return error if no valid acc info data structure */
+ if (!info)
+ return -EFAULT;
+
+ br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,
+ binfo->fiu);
+ if (!br)
+ return -ENOMEM;
+
+ br->id = binfo->current_port_id;
+
+ /* update irq info */
+ port = &hw->port[binfo->current_port_id];
+ feature = get_feature_by_id(&port->feature_list,
+ PORT_FEATURE_ID_UINT);
+ if (feature)
+ info->num_irqs = feature->vec_cnt;
+
+ acc = opae_accelerator_alloc(hw->adapter->name,
+ &ifpga_acc_ops, info);
+ if (!acc) {
+ opae_bridge_free(br);
+ return -ENOMEM;
+ }
+
+ acc->br = br;
+ if (hw->adapter->mgr)
+ acc->mgr = hw->adapter->mgr;
+ acc->index = br->id;
+
+ fme = &hw->fme;
+ fme->nums_acc_region = info->num_regions;
+
+ opae_adapter_add_acc(hw->adapter, acc);
+
+ } else if (binfo->current_type == FME_ID) {
+ mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,
+ &ifpga_mgr_network_ops, binfo->fiu);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->adapter = hw->adapter;
+ hw->adapter->mgr = mgr;
+ }
+
+ binfo->fiu = NULL;
+
+ return 0;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum fpga_id_type type, unsigned int index)
+{
+ int ret;
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ binfo->current_type = type;
+
+ if (type == FME_ID) {
+ binfo->fiu = &binfo->hw->fme;
+ } else if (type == PORT_ID) {
+ binfo->fiu = &binfo->hw->port[index];
+ binfo->current_port_id = index;
+ }
+
+ return 0;
+}
+
+static int parse_feature_fme(struct build_feature_devs_info *binfo,
+ struct feature_header *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ int ret;
+
+ ret = build_info_create_dev(binfo, FME_ID, 0);
+ if (ret)
+ return ret;
+
+ /* Update FME states */
+ fme->state = IFPGA_FME_IMPLEMENTED;
+ fme->parent = hw;
+ TAILQ_INIT(&fme->feature_list);
+ spinlock_init(&fme->lock);
+
+ return create_feature_instance(binfo, start, 0, 0, 0, 0);
+}
+
+static int parse_feature_port(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_port_hw *port;
+ unsigned int port_id;
+ int ret;
+
+ /* Get current port's id */
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_id = capability.port_number;
+
+ ret = build_info_create_dev(binfo, PORT_ID, port_id);
+ if (ret)
+ return ret;
+
+ /*found a Port device*/
+ port = &hw->port[port_id];
+ port->port_id = binfo->current_port_id;
+ port->parent = hw;
+ port->state = IFPGA_PORT_ATTACHED;
+ spinlock_init(&port->lock);
+ TAILQ_INIT(&port->feature_list);
+
+ return create_feature_instance(binfo, start, 0, 0, 0, 0);
+}
+
+static void enable_port_uafu(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
+
+ UNUSED(start);
+
+ fpga_port_reset(port);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ struct feature_fiu_header *fiu_hdr, fiu_header;
+ u8 __iomem *start = (u8 __iomem *)hdr;
+ int ret;
+
+ header.csr = readq(hdr);
+
+ switch (header.id) {
+ case FEATURE_FIU_ID_FME:
+ ret = parse_feature_fme(binfo, hdr);
+ binfo->pfme_hdr = hdr;
+ if (ret)
+ return ret;
+ break;
+ case FEATURE_FIU_ID_PORT:
+ ret = parse_feature_port(binfo, hdr);
+ enable_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+
+ /* Check Port FIU's next_afu pointer to User AFU DFH */
+ fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
+ fiu_header.csr = readq(&fiu_hdr->csr);
+
+ if (fiu_header.next_afu) {
+ start += fiu_header.next_afu;
+ ret = parse_feature_afus(binfo,
+ (struct feature_header *)start);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(binfo, "No AFUs detected on Port\n");
+ }
+
+ break;
+ default:
+ dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
+ header.id);
+ }
+
+ return 0;
+}
+
+static void parse_feature_irqs(struct build_feature_devs_info *binfo,
+ void __iomem *start, unsigned int *vec_start,
+ unsigned int *vec_cnt)
+{
+ UNUSED(binfo);
+ u64 id;
+
+ id = feature_id(start);
+
+ if (id == PORT_FEATURE_ID_UINT) {
+ struct feature_port_uint *port_uint = start;
+ struct feature_port_uint_cap uint_cap;
+
+ uint_cap.csr = readq(&port_uint->capability);
+ if (uint_cap.intr_num) {
+ *vec_start = uint_cap.first_vec_num;
+ *vec_cnt = uint_cap.intr_num;
+ } else {
+ dev_debug(binfo, "UAFU doesn't support interrupt\n");
+ }
+ } else if (id == PORT_FEATURE_ID_ERROR) {
+ struct feature_port_error *port_err = start;
+ struct feature_port_err_capability port_err_cap;
+
+ port_err_cap.csr = readq(&port_err->error_capability);
+ if (port_err_cap.support_intr) {
+ *vec_start = port_err_cap.intr_vector_num;
+ *vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "Port error doesn't support interrupt\n");
+ }
+
+ } else if (id == FME_FEATURE_ID_GLOBAL_ERR) {
+ struct feature_fme_err *fme_err = start;
+ struct feature_fme_error_capability fme_err_cap;
+
+ fme_err_cap.csr = readq(&fme_err->fme_err_capability);
+ if (fme_err_cap.support_intr) {
+ *vec_start = fme_err_cap.intr_vector_num;
+ *vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "FME error doesn't support interrupt\n");
+ }
+ }
+}
+
+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ unsigned int vec_start = 0;
+ unsigned int vec_cnt = 0;
+
+ parse_feature_irqs(binfo, hdr, &vec_start, &vec_cnt);
+
+ return create_feature_instance(binfo, hdr, 0, 0, vec_start, vec_cnt);
+}
+
+static int parse_feature_port_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ unsigned int vec_start = 0;
+ unsigned int vec_cnt = 0;
+
+ parse_feature_irqs(binfo, hdr, &vec_start, &vec_cnt);
+
+ return create_feature_instance(binfo, hdr, 0, 0, vec_start, vec_cnt);
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ switch (binfo->current_type) {
+ case FME_ID:
+ return parse_feature_fme_private(binfo, hdr);
+ case PORT_ID:
+ return parse_feature_port_private(binfo, hdr);
+ default:
+ dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
+ header.id, binfo->current_type);
+ }
+ return 0;
+}
+
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ int ret = 0;
+
+ header.csr = readq(hdr);
+
+ switch (header.type) {
+ case FEATURE_TYPE_AFU:
+ ret = parse_feature_afus(binfo, hdr);
+ break;
+ case FEATURE_TYPE_PRIVATE:
+ ret = parse_feature_private(binfo, hdr);
+ break;
+ case FEATURE_TYPE_FIU:
+ ret = parse_feature_fiu(binfo, hdr);
+ break;
+ default:
+ dev_err(binfo, "Feature Type %x is not supported.\n",
+ hdr->type);
+ };
+
+ return ret;
+}
+
+static int
+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
+{
+ struct feature_header *hdr, header;
+ u8 __iomem *end = (u8 __iomem *)binfo->ioend;
+ int ret = 0;
+
+ for (; start < end; start += header.next_header_offset) {
+ if ((unsigned int)(end - start) < (unsigned int)sizeof(*hdr)) {
+ dev_err(binfo, "The region is too small to contain a feature.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ hdr = (struct feature_header *)start;
+ header.csr = readq(hdr);
+
+ dev_debug(binfo, "%s: address=0x%p, val=0x%llx, header.id=0x%x, header.next_offset=0x%x, header.eol=0x%x, header.type=0x%x\n",
+ __func__, hdr, (unsigned long long)header.csr,
+ header.id, header.next_header_offset,
+ header.end_of_list, header.type);
+
+ ret = parse_feature(binfo, hdr);
+ if (ret)
+ return ret;
+
+ if (header.end_of_list || !header.next_header_offset)
+ break;
+ }
+
+ return build_info_commit_dev(binfo);
+}
+
+/* switch the memory mapping to BAR# @bar */
+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
+{
+ struct opae_adapter_data_pci *pci_data = binfo->pci_data;
+
+ if (!pci_data->region[bar].addr)
+ return -ENOMEM;
+
+ binfo->ioaddr = pci_data->region[bar].addr;
+ binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
+ binfo->phys_addr = pci_data->region[bar].phys_addr;
+ binfo->current_bar = bar;
+
+ return 0;
+}
+
+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_port port;
+ int i = 0, ret = 0;
+
+ if (!binfo->pfme_hdr) {
+ dev_info(binfo, "VF is detected.\n");
+ return ret;
+ }
+
+ fme_hdr = binfo->pfme_hdr;
+
+ do {
+ port.csr = readq(&fme_hdr->port[i]);
+ if (!port.port_implemented)
+ break;
+
+ /* skip port which only could be accessed via VF */
+ if (port.afu_access_control == FME_AFU_ACCESS_VF)
+ continue;
+
+ ret = parse_switch_to(binfo, port.port_bar);
+ if (ret)
+ break;
+
+ ret = parse_feature_list(binfo,
+ (u8 __iomem *)binfo->ioaddr +
+ port.port_offset);
+ if (ret)
+ break;
+ } while (++i < MAX_FPGA_PORT_NUM);
+
+ return ret;
+}
+
+static struct build_feature_devs_info *
+build_info_alloc_and_init(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+
+ binfo = zmalloc(sizeof(*binfo));
+ if (!binfo)
+ return binfo;
+
+ binfo->hw = hw;
+ binfo->pci_data = hw->pci_data;
+
+ /* fpga feature list starts from BAR 0 */
+ if (parse_switch_to(binfo, 0)) {
+ free(binfo);
+ return NULL;
+ }
+
+ return binfo;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ free(binfo);
+}
+
+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
+{
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct ifpga_port_hw *port;
+ struct ifpga_feature *feature;
+ int i;
+
+ dev_info(hw, "found fme_device, is in PF: %s\n",
+ is_ifpga_hw_pf(hw) ? "yes" : "no");
+
+ ifpga_for_each_fme_feature(fme, feature) {
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: %p - %p - paddr: 0x%lx\n",
+ feature->name, feature->addr,
+ feature->addr + feature->size - 1,
+ (unsigned long)feature->phys_addr);
+
+ }
+
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+ port = &hw->port[i];
+
+ if (port->state != IFPGA_PORT_ATTACHED)
+ continue;
+
+ dev_info(hw, "port device: %d\n", port->port_id);
+
+ ifpga_for_each_port_feature(port, feature) {
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: %p - %p - paddr:0x%lx\n",
+ feature->name,
+ feature->addr,
+ feature->addr +
+ feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+
+ }
+}
+
+int ifpga_bus_enumerate(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+ int ret;
+
+ binfo = build_info_alloc_and_init(hw);
+ if (!binfo)
+ return -ENOMEM;
+
+ ret = parse_feature_list(binfo, binfo->ioaddr);
+ if (ret)
+ goto exit;
+
+ ret = parse_ports_from_fme(binfo);
+ if (ret)
+ goto exit;
+
+ ifpga_print_device_feature_list(hw);
+
+exit:
+ build_info_free(binfo);
+ return ret;
+}
+
+int ifpga_bus_init(struct ifpga_hw *hw)
+{
+ int i;
+ struct ifpga_port_hw *port;
+
+ fme_hw_init(&hw->fme);
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+ port = &hw->port[i];
+ port_hw_init(port);
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.h b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.h
new file mode 100644
index 000000000..14131e320
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_enumerate.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_ENUMERATE_H_
+#define _IFPGA_ENUMERATE_H_
+
+int ifpga_bus_init(struct ifpga_hw *hw);
+int ifpga_bus_enumerate(struct ifpga_hw *hw);
+
+#endif /* _IFPGA_ENUMERATE_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c
new file mode 100644
index 000000000..0f852a75a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+#include <rte_vfio.h>
+
+#include "ifpga_feature_dev.h"
+
+/*
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __fpga_port_enable function should only be used after __fpga_port_disable
+ * function.
+ */
+void __fpga_port_enable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ WARN_ON(!port->disable_count);
+
+ if (--port->disable_count != 0)
+ return;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x0;
+ writeq(control.csr, &port_hdr->control);
+}
+
+int __fpga_port_disable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ if (port->disable_count++ != 0)
+ return 0;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ /* Set port soft reset */
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x1;
+ writeq(control.csr, &port_hdr->control);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ control.port_sftrst_ack = 1;
+
+ if (fpga_wait_register_field(port_sftrst_ack, control,
+ &port_hdr->control, RST_POLL_TIMEOUT,
+ RST_POLL_INVL)) {
+ dev_err(port, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
+{
+ struct feature_port_header *port_hdr;
+ u64 guidl, guidh;
+
+ if (!uuid)
+ return -EINVAL;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
+
+ spinlock_lock(&port->lock);
+ guidl = readq(&port_hdr->afu_header.guid.b[0]);
+ guidh = readq(&port_hdr->afu_header.guid.b[8]);
+ spinlock_unlock(&port->lock);
+
+ opae_memcpy(uuid->b, &guidl, sizeof(u64));
+ opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
+
+ return 0;
+}
+
+/* Mask / Unmask Port Errors by the Error Mask register. */
+void port_err_mask(struct ifpga_port_hw *port, bool mask)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key err_mask;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ if (mask)
+ err_mask.csr = PORT_ERR_MASK;
+ else
+ err_mask.csr = 0;
+
+ writeq(err_mask.csr, &port_err->error_mask);
+}
+
+/* Clear All Port Errors. */
+int port_err_clear(struct ifpga_port_hw *port, u64 err)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_error *port_err;
+ struct feature_port_err_key mask;
+ struct feature_port_first_err_key first;
+ struct feature_port_status status;
+ int ret = 0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ /*
+ * Clear All Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* If device is still in AP6 state, can not clear any error.*/
+ status.csr = readq(&port_hdr->status);
+ if (status.power_state == PORT_POWER_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ return -EBUSY;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ /* Mask all errors */
+ port_err_mask(port, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ mask.csr = readq(&port_err->port_error);
+
+ if (mask.csr == err) {
+ writeq(mask.csr, &port_err->port_error);
+
+ first.csr = readq(&port_err->port_first_error);
+ writeq(first.csr, &port_err->port_first_error);
+ } else {
+ ret = -EBUSY;
+ }
+
+ /* Clear mask */
+ port_err_mask(port, false);
+
+ /* Enable the Port by clear the reset */
+ __fpga_port_enable(port);
+
+ return ret;
+}
+
+int port_clear_error(struct ifpga_port_hw *port)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+
+ dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
+
+ return port_err_clear(port, error.csr);
+}
+
+static struct feature_driver fme_feature_drvs[] = {
+ {FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
+ &fme_hdr_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
+ &fme_thermal_mgmt_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
+ &fme_power_mgmt_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
+ &fme_global_err_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
+ &fme_pr_mgmt_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
+ &fme_global_dperf_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
+ &fme_hssi_eth_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
+ &fme_emif_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
+ &fme_spi_master_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
+ &fme_nios_spi_master_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
+ &fme_i2c_master_ops),},
+ {FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
+ &fme_eth_group_ops),},
+ {0, NULL, NULL}, /* end of arrary */
+};
+
+static struct feature_driver port_feature_drvs[] = {
+ {FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
+ &ifpga_rawdev_port_hdr_ops)},
+ {FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
+ &ifpga_rawdev_port_error_ops)},
+ {FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
+ &ifpga_rawdev_port_uint_ops)},
+ {FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
+ &ifpga_rawdev_port_stp_ops)},
+ {FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
+ &ifpga_rawdev_port_afu_ops)},
+ {0, NULL, NULL}, /* end of array */
+};
+
+const char *get_fme_feature_name(unsigned int id)
+{
+ struct feature_driver *drv = fme_feature_drvs;
+
+ while (drv->name) {
+ if (drv->id == id)
+ return drv->name;
+
+ drv++;
+ }
+
+ return NULL;
+}
+
+const char *get_port_feature_name(unsigned int id)
+{
+ struct feature_driver *drv = port_feature_drvs;
+
+ while (drv->name) {
+ if (drv->id == id)
+ return drv->name;
+
+ drv++;
+ }
+
+ return NULL;
+}
+
+static void feature_uinit(struct ifpga_feature_list *list)
+{
+ struct ifpga_feature *feature;
+
+ TAILQ_FOREACH(feature, list, next) {
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+ if (feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+static int feature_init(struct feature_driver *drv,
+ struct ifpga_feature_list *list)
+{
+ struct ifpga_feature *feature;
+ int ret;
+
+ while (drv->ops) {
+ TAILQ_FOREACH(feature, list, next) {
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+ if (feature->id == drv->id) {
+ feature->ops = drv->ops;
+ feature->name = drv->name;
+ if (feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret)
+ goto error;
+ }
+ }
+ }
+ drv++;
+ }
+
+ return 0;
+error:
+ feature_uinit(list);
+ return ret;
+}
+
+int fme_hw_init(struct ifpga_fme_hw *fme)
+{
+ int ret;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -ENODEV;
+
+ ret = feature_init(fme_feature_drvs, &fme->feature_list);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void fme_hw_uinit(struct ifpga_fme_hw *fme)
+{
+ feature_uinit(&fme->feature_list);
+}
+
+void port_hw_uinit(struct ifpga_port_hw *port)
+{
+ feature_uinit(&port->feature_list);
+}
+
+int port_hw_init(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ if (port->state == IFPGA_PORT_UNUSED)
+ return 0;
+
+ ret = feature_init(port_feature_drvs, &port->feature_list);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ port_hw_uinit(port);
+ return ret;
+}
+
+#define FPGA_MAX_MSIX_VEC_COUNT 128
+/* irq set buffer length for interrupt */
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * FPGA_MAX_MSIX_VEC_COUNT)
+
+/* only support msix for now*/
+static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,
+ unsigned int count, s32 *fds)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int len, ret;
+ int *fd_ptr;
+
+ len = sizeof(irq_set_buf);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ /* 0 < irq_set->count < FPGA_MAX_MSIX_VEC_COUNT */
+ irq_set->count = count ?
+ (count > FPGA_MAX_MSIX_VEC_COUNT ?
+ FPGA_MAX_MSIX_VEC_COUNT : count) : 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = vec_start;
+
+ fd_ptr = (int *)&irq_set->data;
+ opae_memcpy(fd_ptr, fds, sizeof(int) * count);
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret)
+ printf("Error enabling MSI-X interrupts\n");
+
+ return ret;
+}
+
+int fpga_msix_set_block(struct ifpga_feature *feature, unsigned int start,
+ unsigned int count, s32 *fds)
+{
+ struct feature_irq_ctx *ctx = feature->ctx;
+ unsigned int i;
+ int ret;
+
+ if (start >= feature->ctx_num || start + count > feature->ctx_num)
+ return -EINVAL;
+
+ /* assume that each feature has continuous vector space in msix*/
+ ret = vfio_msix_enable_block(feature->vfio_dev_fd,
+ ctx[start].idx, count, fds);
+ if (!ret) {
+ for (i = 0; i < count; i++)
+ ctx[i].eventfd = fds[i];
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.h b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.h
new file mode 100644
index 000000000..2b1309b44
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_FEATURE_DEV_H_
+#define _IFPGA_FEATURE_DEV_H_
+
+#include "ifpga_hw.h"
+
+struct feature_driver {
+ u64 id;
+ const char *name;
+ struct ifpga_feature_ops *ops;
+};
+
+/**
+ * FEATURE_DRV - macro used to describe a specific feature driver
+ */
+#define FEATURE_DRV(n, s, p) \
+ .id = (n), .name = (s), .ops = (p)
+
+static inline struct ifpga_port_hw *
+get_port(struct ifpga_hw *hw, u32 port_id)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return NULL;
+
+ return &hw->port[port_id];
+}
+
+#define ifpga_for_each_fme_feature(hw, feature) \
+ TAILQ_FOREACH(feature, &hw->feature_list, next)
+
+#define ifpga_for_each_port_feature(port, feature) \
+ TAILQ_FOREACH(feature, &port->feature_list, next)
+
+static inline struct ifpga_feature *
+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
+{
+ struct ifpga_feature *feature;
+
+ ifpga_for_each_fme_feature(fme, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline struct ifpga_feature *
+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
+{
+ struct ifpga_feature *feature;
+
+ ifpga_for_each_port_feature(port, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline struct ifpga_feature *
+get_feature_by_id(struct ifpga_feature_list *list, u64 id)
+{
+ struct ifpga_feature *feature;
+
+ TAILQ_FOREACH(feature, list, next)
+ if (feature->id == id)
+ return feature;
+
+ return NULL;
+}
+
+static inline void *
+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
+{
+ struct ifpga_feature *feature =
+ get_feature_by_id(&fme->feature_list, index);
+
+ return feature ? feature->addr : NULL;
+}
+
+static inline void *
+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
+{
+ struct ifpga_feature *feature =
+ get_feature_by_id(&port->feature_list, index);
+
+ return feature ? feature->addr : NULL;
+}
+
+static inline bool
+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
+{
+ return !!get_fme_feature_ioaddr_by_index(fme, index);
+}
+
+static inline bool
+is_port_feature_present(struct ifpga_port_hw *port, int index)
+{
+ return !!get_port_feature_ioaddr_by_index(port, index);
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);
+
+int __fpga_port_disable(struct ifpga_port_hw *port);
+void __fpga_port_enable(struct ifpga_port_hw *port);
+
+static inline int fpga_port_disable(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_disable(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+static inline int fpga_port_enable(struct ifpga_port_hw *port)
+{
+ spinlock_lock(&port->lock);
+ __fpga_port_enable(port);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static inline int __fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ __fpga_port_enable(port);
+
+ return 0;
+}
+
+static inline int fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_reset(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, u32 size,
+ u64 *status);
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
+
+int fme_hw_init(struct ifpga_fme_hw *fme);
+void fme_hw_uinit(struct ifpga_fme_hw *fme);
+void port_hw_uinit(struct ifpga_port_hw *port);
+int port_hw_init(struct ifpga_port_hw *port);
+int port_clear_error(struct ifpga_port_hw *port);
+void port_err_mask(struct ifpga_port_hw *port, bool mask);
+int port_err_clear(struct ifpga_port_hw *port, u64 err);
+
+extern struct ifpga_feature_ops fme_hdr_ops;
+extern struct ifpga_feature_ops fme_thermal_mgmt_ops;
+extern struct ifpga_feature_ops fme_power_mgmt_ops;
+extern struct ifpga_feature_ops fme_global_err_ops;
+extern struct ifpga_feature_ops fme_pr_mgmt_ops;
+extern struct ifpga_feature_ops fme_global_iperf_ops;
+extern struct ifpga_feature_ops fme_global_dperf_ops;
+extern struct ifpga_feature_ops fme_hssi_eth_ops;
+extern struct ifpga_feature_ops fme_emif_ops;
+extern struct ifpga_feature_ops fme_spi_master_ops;
+extern struct ifpga_feature_ops fme_i2c_master_ops;
+extern struct ifpga_feature_ops fme_eth_group_ops;
+extern struct ifpga_feature_ops fme_nios_spi_master_ops;
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+
+/* This struct is used when parsing uafu irq_set */
+struct fpga_uafu_irq_set {
+ u32 start;
+ u32 count;
+ s32 *evtfds;
+};
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
+const char *get_fme_feature_name(unsigned int id);
+const char *get_port_feature_name(unsigned int id);
+
+extern struct ifpga_feature_ops ifpga_rawdev_port_hdr_ops;
+extern struct ifpga_feature_ops ifpga_rawdev_port_error_ops;
+extern struct ifpga_feature_ops ifpga_rawdev_port_stp_ops;
+extern struct ifpga_feature_ops ifpga_rawdev_port_uint_ops;
+extern struct ifpga_feature_ops ifpga_rawdev_port_afu_ops;
+
+/* help functions for feature ops */
+int fpga_msix_set_block(struct ifpga_feature *feature, unsigned int start,
+ unsigned int count, s32 *fds);
+
+/* FME network function ops*/
+int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
+ void *buf, int size);
+int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
+ void *buf, int size);
+int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme);
+int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
+ u8 group_id, struct opae_eth_group_info *info);
+int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 *data);
+int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 data);
+int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
+ struct opae_retimer_info *info);
+int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
+ struct opae_retimer_status *status);
+int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
+ struct opae_sensor_info *sensor,
+ unsigned int *value);
+#endif /* _IFPGA_FEATURE_DEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme.c
new file mode 100644
index 000000000..c31a94cf8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme.c
@@ -0,0 +1,1406 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+#include "opae_spi.h"
+#include "opae_intel_max10.h"
+#include "opae_i2c.h"
+#include "opae_at24_eeprom.h"
+
+#define PWR_THRESHOLD_MAX 0x7F
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct ifpga_feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct ifpga_feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
+{
+ struct ifpga_feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+/* fme private feature head */
+static int fme_hdr_init(struct ifpga_feature *feature)
+{
+ struct feature_fme_header *fme_hdr;
+
+ fme_hdr = (struct feature_fme_header *)feature->addr;
+
+ dev_info(NULL, "FME HDR Init.\n");
+ dev_info(NULL, "FME cap %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME HDR UInit.\n");
+}
+
+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&fme_hdr->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *ports_num = fme_capability.num_ports;
+
+ return 0;
+}
+
+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *cache_size = fme_capability.cache_size;
+
+ return 0;
+}
+
+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *version = fme_capability.fabric_verid;
+
+ return 0;
+}
+
+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *socket_id = fme_capability.socket_id;
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
+ u64 *bitstream_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_id = readq(&fme_hdr->bitstream_id);
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
+ u64 *bitstream_metadata)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_metadata = readq(&fme_hdr->bitstream_md);
+
+ return 0;
+}
+
+static int
+fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_HDR_PROP_REVISION:
+ return fme_hdr_get_revision(fme, &prop->data);
+ case FME_HDR_PROP_PORTS_NUM:
+ return fme_hdr_get_ports_num(fme, &prop->data);
+ case FME_HDR_PROP_CACHE_SIZE:
+ return fme_hdr_get_cache_size(fme, &prop->data);
+ case FME_HDR_PROP_VERSION:
+ return fme_hdr_get_version(fme, &prop->data);
+ case FME_HDR_PROP_SOCKET_ID:
+ return fme_hdr_get_socket_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_ID:
+ return fme_hdr_get_bitstream_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_METADATA:
+ return fme_hdr_get_bitstream_metadata(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct ifpga_feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+ .get_prop = fme_hdr_get_prop,
+};
+
+/* thermal management */
+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1 = temp_threshold.tmp_thshold1;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres1 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres1 == 0) {
+ tmp_threshold.tmp_thshold1_enable = 0;
+ tmp_threshold.tmp_thshold1 = thres1;
+ } else {
+ tmp_threshold.tmp_thshold1_enable = 1;
+ tmp_threshold.tmp_thshold1 = thres1;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres2 = temp_threshold.tmp_thshold2;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres2 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres2 == 0) {
+ tmp_threshold.tmp_thshold2_enable = 0;
+ tmp_threshold.tmp_thshold2 = thres2;
+ } else {
+ tmp_threshold.tmp_thshold2_enable = 1;
+ tmp_threshold.tmp_thshold2 = thres2;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
+ u64 *thres_trip)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres_trip = temp_threshold.therm_trip_thshold;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold1_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold2_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 *thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_policy = temp_threshold.thshold_policy;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold tmp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+
+ if (thres1_policy == 0) {
+ tmp_threshold.thshold_policy = 0;
+ } else if (thres1_policy == 1) {
+ tmp_threshold.thshold_policy = 1;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
+ *temp = temp_rdsensor_fmt1.fpga_temp;
+
+ return 0;
+}
+
+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_thermal *fme_thermal
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_thermal->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
+
+static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
+{
+ struct feature_fme_thermal *fme_thermal;
+ struct feature_fme_tmp_threshold_cap thermal_cap;
+
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt Init.\n");
+
+ fme_thermal = (struct feature_fme_thermal *)feature->addr;
+ thermal_cap.csr = readq(&fme_thermal->threshold_cap);
+
+ dev_info(NULL, "FME thermal cap %llx.\n",
+ (unsigned long long)fme_thermal->threshold_cap.csr);
+
+ if (thermal_cap.tmp_thshold_disabled)
+ feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
+
+ return 0;
+}
+
+static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt UInit.\n");
+}
+
+static int
+fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_set_threshold1(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_set_threshold2(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_set_threshold1_policy(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int
+fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
+ prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
+ prop->prop_id != FME_THERMAL_PROP_REVISION)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_get_threshold1(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_get_threshold2(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD_TRIP:
+ return fme_thermal_get_threshold_trip(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_REACHED:
+ return fme_thermal_get_threshold1_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2_REACHED:
+ return fme_thermal_get_threshold2_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_get_threshold1_policy(fme, &prop->data);
+ case FME_THERMAL_PROP_TEMPERATURE:
+ return fme_thermal_get_temperature(fme, &prop->data);
+ case FME_THERMAL_PROP_REVISION:
+ return fme_thermal_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct ifpga_feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+ .uinit = fme_thermal_mgmt_uinit,
+ .get_prop = fme_thermal_get_prop,
+ .set_prop = fme_thermal_set_prop,
+};
+
+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *consumed = pm_status.pwr_consumed;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold1;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold1 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold2;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold2 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold1_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold2_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *rtl = pm_status.fpga_latency_report;
+
+ return 0;
+}
+
+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_xeon_limit xeon_limit;
+
+ xeon_limit.csr = readq(&fme_power->xeon_limit);
+
+ if (!xeon_limit.enable)
+ xeon_limit.pwr_limit = 0;
+
+ *limit = xeon_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_fpga_limit fpga_limit;
+
+ fpga_limit.csr = readq(&fme_power->fpga_limit);
+
+ if (!fpga_limit.enable)
+ fpga_limit.pwr_limit = 0;
+
+ *limit = fpga_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_power->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_power_mgmt_init(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt Init.\n");
+
+ return 0;
+}
+
+static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt UInit.\n");
+}
+
+static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_CONSUMED:
+ return fme_pwr_get_consumed(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_get_threshold1(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_get_threshold2(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1_STATUS:
+ return fme_pwr_get_threshold1_status(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2_STATUS:
+ return fme_pwr_get_threshold2_status(fme, &prop->data);
+ case FME_PWR_PROP_RTL:
+ return fme_pwr_get_rtl(fme, &prop->data);
+ case FME_PWR_PROP_XEON_LIMIT:
+ return fme_pwr_get_xeon_limit(fme, &prop->data);
+ case FME_PWR_PROP_FPGA_LIMIT:
+ return fme_pwr_get_fpga_limit(fme, &prop->data);
+ case FME_PWR_PROP_REVISION:
+ return fme_pwr_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_set_threshold1(fme, prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_set_threshold2(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct ifpga_feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+ .uinit = fme_power_mgmt_uinit,
+ .get_prop = fme_power_mgmt_get_prop,
+ .set_prop = fme_power_mgmt_set_prop,
+};
+
+static int fme_hssi_eth_init(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+ return 0;
+}
+
+static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+}
+
+struct ifpga_feature_ops fme_hssi_eth_ops = {
+ .init = fme_hssi_eth_init,
+ .uinit = fme_hssi_eth_uinit,
+};
+
+static int fme_emif_init(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+ return 0;
+}
+
+static void fme_emif_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+}
+
+struct ifpga_feature_ops fme_emif_ops = {
+ .init = fme_emif_init,
+ .uinit = fme_emif_uinit,
+};
+
+static const char *board_type_to_string(u32 type)
+{
+ switch (type) {
+ case VC_8_10G:
+ return "VC_8x10G";
+ case VC_4_25G:
+ return "VC_4x25G";
+ case VC_2_1_25:
+ return "VC_2x1x25G";
+ case VC_4_25G_2_25G:
+ return "VC_4x25G+2x25G";
+ case VC_2_2_25G:
+ return "VC_2x2x25G";
+ }
+
+ return "unknown";
+}
+
+static const char *board_major_to_string(u32 major)
+{
+ switch (major) {
+ case VISTA_CREEK:
+ return "VISTA_CREEK";
+ case RUSH_CREEK:
+ return "RUSH_CREEK";
+ case DARBY_CREEK:
+ return "DARBY_CREEK";
+ }
+
+ return "unknown";
+}
+
+static int board_type_to_info(u32 type,
+ struct opae_board_info *info)
+{
+ switch (type) {
+ case VC_8_10G:
+ info->nums_of_retimer = 2;
+ info->ports_per_retimer = 4;
+ info->nums_of_fvl = 2;
+ info->ports_per_fvl = 4;
+ break;
+ case VC_4_25G:
+ info->nums_of_retimer = 1;
+ info->ports_per_retimer = 4;
+ info->nums_of_fvl = 2;
+ info->ports_per_fvl = 2;
+ break;
+ case VC_2_1_25:
+ info->nums_of_retimer = 2;
+ info->ports_per_retimer = 1;
+ info->nums_of_fvl = 1;
+ info->ports_per_fvl = 2;
+ break;
+ case VC_2_2_25G:
+ info->nums_of_retimer = 2;
+ info->ports_per_retimer = 2;
+ info->nums_of_fvl = 2;
+ info->ports_per_fvl = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fme_get_board_interface(struct ifpga_fme_hw *fme)
+{
+ struct fme_bitstream_id id;
+ struct ifpga_hw *hw;
+ u32 val;
+
+ hw = fme->parent;
+ if (!hw)
+ return -ENODEV;
+
+ if (fme_hdr_get_bitstream_id(fme, &id.id))
+ return -EINVAL;
+
+ fme->board_info.major = id.major;
+ fme->board_info.minor = id.minor;
+ fme->board_info.type = id.interface;
+ fme->board_info.fvl_bypass = id.fvl_bypass;
+ fme->board_info.mac_lightweight = id.mac_lightweight;
+ fme->board_info.lightweight = id.lightweiht;
+ fme->board_info.disaggregate = id.disagregate;
+ fme->board_info.seu = id.seu;
+ fme->board_info.ptp = id.ptp;
+
+ dev_info(fme, "found: PCI dev: %02x:%02x:%x board: %s type: %s\n",
+ hw->pci_data->bus,
+ hw->pci_data->devid,
+ hw->pci_data->function,
+ board_major_to_string(fme->board_info.major),
+ board_type_to_string(fme->board_info.type));
+
+ dev_info(fme, "support feature:\n"
+ "fvl_bypass:%s\n"
+ "mac_lightweight:%s\n"
+ "lightweight:%s\n"
+ "disaggregate:%s\n"
+ "seu:%s\n"
+ "ptp1588:%s\n",
+ check_support(fme->board_info.fvl_bypass),
+ check_support(fme->board_info.mac_lightweight),
+ check_support(fme->board_info.lightweight),
+ check_support(fme->board_info.disaggregate),
+ check_support(fme->board_info.seu),
+ check_support(fme->board_info.ptp));
+
+
+ if (board_type_to_info(fme->board_info.type, &fme->board_info))
+ return -EINVAL;
+
+ dev_info(fme, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
+ fme->board_info.nums_of_retimer,
+ fme->board_info.ports_per_retimer,
+ fme->board_info.nums_of_fvl,
+ fme->board_info.ports_per_fvl);
+
+ if (max10_sys_read(fme->max10_dev, MAX10_BUILD_VER, &val))
+ return -EINVAL;
+ fme->board_info.max10_version = val & 0xffffff;
+
+ if (max10_sys_read(fme->max10_dev, NIOS2_FW_VERSION, &val))
+ return -EINVAL;
+ fme->board_info.nios_fw_version = val & 0xffffff;
+
+ dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
+ fme->board_info.max10_version,
+ fme->board_info.nios_fw_version);
+
+ return 0;
+}
+
+static int spi_self_checking(struct intel_max10_device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = max10_sys_read(dev, MAX10_TEST_REG, &val);
+ if (ret)
+ return -EIO;
+
+ dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
+
+ return 0;
+}
+
+static int fme_spi_init(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+ struct altera_spi_device *spi_master;
+ struct intel_max10_device *max10;
+ int ret = 0;
+
+ dev_info(fme, "FME SPI Master (Max10) Init.\n");
+ dev_debug(fme, "FME SPI base addr %p.\n",
+ feature->addr);
+ dev_debug(fme, "spi param=0x%llx\n",
+ (unsigned long long)opae_readq(feature->addr + 0x8));
+
+ spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
+ if (!spi_master)
+ return -ENODEV;
+
+ altera_spi_init(spi_master);
+
+ max10 = intel_max10_device_probe(spi_master, 0);
+ if (!max10) {
+ ret = -ENODEV;
+ dev_err(fme, "max10 init fail\n");
+ goto spi_fail;
+ }
+
+
+ fme->max10_dev = max10;
+
+ /* SPI self test */
+ if (spi_self_checking(max10)) {
+ ret = -EIO;
+ goto max10_fail;
+ }
+
+ return ret;
+
+max10_fail:
+ intel_max10_device_remove(fme->max10_dev);
+spi_fail:
+ altera_spi_release(spi_master);
+ return ret;
+}
+
+static void fme_spi_uinit(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+
+ if (fme->max10_dev)
+ intel_max10_device_remove(fme->max10_dev);
+}
+
+struct ifpga_feature_ops fme_spi_master_ops = {
+ .init = fme_spi_init,
+ .uinit = fme_spi_uinit,
+};
+
+static int nios_spi_wait_init_done(struct altera_spi_device *dev)
+{
+ u32 val = 0;
+ unsigned long timeout = msecs_to_timer_cycles(10000);
+ unsigned long ticks;
+ int major_version;
+
+ if (spi_reg_read(dev, NIOS_VERSION, &val))
+ return -EIO;
+
+ major_version = (val >> NIOS_VERSION_MAJOR_SHIFT) &
+ NIOS_VERSION_MAJOR;
+ dev_debug(dev, "A10 NIOS FW version %d\n", major_version);
+
+ if (major_version >= 3) {
+ /* read NIOS_INIT to check if PKVL INIT done or not */
+ if (spi_reg_read(dev, NIOS_INIT, &val))
+ return -EIO;
+
+ /* check if PKVLs are initialized already */
+ if (val & NIOS_INIT_DONE || val & NIOS_INIT_START)
+ goto nios_init_done;
+
+ /* start to config the default FEC mode */
+ val = NIOS_INIT_START;
+
+ if (spi_reg_write(dev, NIOS_INIT, val))
+ return -EIO;
+ }
+
+nios_init_done:
+ do {
+ if (spi_reg_read(dev, NIOS_INIT, &val))
+ return -EIO;
+ if (val)
+ break;
+
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, timeout))
+ return -ETIMEDOUT;
+ msleep(100);
+ } while (!val);
+
+ return 0;
+}
+
+static int nios_spi_check_error(struct altera_spi_device *dev)
+{
+ u32 value = 0;
+
+ if (spi_reg_read(dev, PKVL_A_MODE_STS, &value))
+ return -EIO;
+
+ dev_debug(dev, "PKVL A Mode Status 0x%x\n", value);
+
+ if (value >= 0x100)
+ return -EINVAL;
+
+ if (spi_reg_read(dev, PKVL_B_MODE_STS, &value))
+ return -EIO;
+
+ dev_debug(dev, "PKVL B Mode Status 0x%x\n", value);
+
+ if (value >= 0x100)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fme_nios_spi_init(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+ struct altera_spi_device *spi_master;
+ struct intel_max10_device *max10;
+ struct ifpga_hw *hw;
+ struct opae_manager *mgr;
+ int ret = 0;
+
+ hw = fme->parent;
+ if (!hw)
+ return -ENODEV;
+
+ mgr = hw->adapter->mgr;
+ if (!mgr)
+ return -ENODEV;
+
+ dev_info(fme, "FME SPI Master (NIOS) Init.\n");
+ dev_debug(fme, "FME SPI base addr %p.\n",
+ feature->addr);
+ dev_debug(fme, "spi param=0x%llx\n",
+ (unsigned long long)opae_readq(feature->addr + 0x8));
+
+ spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
+ if (!spi_master)
+ return -ENODEV;
+
+ /**
+ * 1. wait A10 NIOS initial finished and
+ * release the SPI master to Host
+ */
+ ret = nios_spi_wait_init_done(spi_master);
+ if (ret != 0) {
+ dev_err(fme, "FME NIOS_SPI init fail\n");
+ goto release_dev;
+ }
+
+ dev_info(fme, "FME NIOS_SPI initial done\n");
+
+ /* 2. check if error occur? */
+ if (nios_spi_check_error(spi_master))
+ dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
+
+ /* 3. init the spi master*/
+ altera_spi_init(spi_master);
+
+ /* init the max10 device */
+ max10 = intel_max10_device_probe(spi_master, 0);
+ if (!max10) {
+ ret = -ENODEV;
+ dev_err(fme, "max10 init fail\n");
+ goto release_dev;
+ }
+
+ max10->bus = hw->pci_data->bus;
+
+ fme_get_board_interface(fme);
+
+ fme->max10_dev = max10;
+ mgr->sensor_list = &max10->opae_sensor_list;
+
+ /* SPI self test */
+ if (spi_self_checking(max10))
+ goto spi_fail;
+
+ return ret;
+
+spi_fail:
+ intel_max10_device_remove(fme->max10_dev);
+release_dev:
+ altera_spi_release(spi_master);
+ return -ENODEV;
+}
+
+static void fme_nios_spi_uinit(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+
+ if (fme->max10_dev)
+ intel_max10_device_remove(fme->max10_dev);
+}
+
+struct ifpga_feature_ops fme_nios_spi_master_ops = {
+ .init = fme_nios_spi_init,
+ .uinit = fme_nios_spi_uinit,
+};
+
+static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
+{
+ char buf[20];
+ int ret;
+ char read_buf[20] = {0,};
+ const char *string = "1a2b3c4d5e";
+
+ opae_memcpy(buf, string, strlen(string));
+
+ ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
+ (u8 *)buf, strlen(string));
+ if (ret < 0) {
+ dev_err(NULL, "write i2c error:%d\n", ret);
+ return ret;
+ }
+
+ ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
+ (u8 *)read_buf, strlen(string));
+ if (ret < 0) {
+ dev_err(NULL, "read i2c error:%d\n", ret);
+ return ret;
+ }
+
+ if (memcmp(buf, read_buf, strlen(string))) {
+ dev_err(NULL, "%s test fail!\n", __func__);
+ return -EFAULT;
+ }
+
+ dev_info(NULL, "%s test successful\n", __func__);
+
+ return 0;
+}
+
+static int fme_i2c_init(struct ifpga_feature *feature)
+{
+ struct feature_fme_i2c *i2c;
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+
+ i2c = (struct feature_fme_i2c *)feature->addr;
+
+ dev_info(NULL, "FME I2C Master Init.\n");
+
+ fme->i2c_master = altera_i2c_probe(i2c);
+ if (!fme->i2c_master)
+ return -ENODEV;
+
+ /* MAC ROM self test */
+ i2c_mac_rom_test(fme->i2c_master);
+
+ return 0;
+}
+
+static void fme_i2c_uninit(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+
+ altera_i2c_remove(fme->i2c_master);
+}
+
+struct ifpga_feature_ops fme_i2c_master_ops = {
+ .init = fme_i2c_init,
+ .uinit = fme_i2c_uninit,
+};
+
+static int fme_eth_group_init(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
+ struct eth_group_device *dev;
+
+ dev = (struct eth_group_device *)eth_group_probe(feature->addr);
+ if (!dev)
+ return -ENODEV;
+
+ fme->eth_dev[dev->group_id] = dev;
+
+ fme->eth_group_region[dev->group_id].addr =
+ feature->addr;
+ fme->eth_group_region[dev->group_id].phys_addr =
+ feature->phys_addr;
+ fme->eth_group_region[dev->group_id].len =
+ feature->size;
+
+ fme->nums_eth_dev++;
+
+ dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
+ dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
+ dev->group_id, feature->addr,
+ (unsigned long long)feature->phys_addr,
+ feature->size);
+
+ return 0;
+}
+
+static void fme_eth_group_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+}
+
+struct ifpga_feature_ops fme_eth_group_ops = {
+ .init = fme_eth_group_init,
+ .uinit = fme_eth_group_uinit,
+};
+
+int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
+ void *buf, int size)
+{
+ struct altera_i2c_dev *dev;
+
+ dev = fme->i2c_master;
+ if (!dev)
+ return -ENODEV;
+
+ return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
+}
+
+int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
+ void *buf, int size)
+{
+ struct altera_i2c_dev *dev;
+
+ dev = fme->i2c_master;
+ if (!dev)
+ return -ENODEV;
+
+ return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
+}
+
+static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
+ u8 group_id)
+{
+ struct eth_group_device *dev;
+
+ if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
+ return NULL;
+
+ dev = (struct eth_group_device *)fme->eth_dev[group_id];
+ if (!dev)
+ return NULL;
+
+ if (dev->status != ETH_GROUP_DEV_ATTACHED)
+ return NULL;
+
+ return dev;
+}
+
+int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
+{
+ return fme->nums_eth_dev;
+}
+
+int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
+ u8 group_id, struct opae_eth_group_info *info)
+{
+ struct eth_group_device *dev;
+
+ dev = get_eth_group_dev(fme, group_id);
+ if (!dev)
+ return -ENODEV;
+
+ info->group_id = group_id;
+ info->speed = dev->speed;
+ info->nums_of_mac = dev->mac_num;
+ info->nums_of_phy = dev->phy_num;
+
+ return 0;
+}
+
+int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 *data)
+{
+ struct eth_group_device *dev;
+
+ dev = get_eth_group_dev(fme, group_id);
+ if (!dev)
+ return -ENODEV;
+
+ return eth_group_read_reg(dev, type, index, addr, data);
+}
+
+int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 data)
+{
+ struct eth_group_device *dev;
+
+ dev = get_eth_group_dev(fme, group_id);
+ if (!dev)
+ return -ENODEV;
+
+ return eth_group_write_reg(dev, type, index, addr, data);
+}
+
+static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
+ u8 group_id)
+{
+ struct eth_group_device *dev;
+
+ dev = get_eth_group_dev(fme, group_id);
+ if (!dev)
+ return -ENODEV;
+
+ return dev->speed;
+}
+
+int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
+ struct opae_retimer_info *info)
+{
+ struct intel_max10_device *dev;
+
+ dev = (struct intel_max10_device *)fme->max10_dev;
+ if (!dev)
+ return -ENODEV;
+
+ info->nums_retimer = fme->board_info.nums_of_retimer;
+ info->ports_per_retimer = fme->board_info.ports_per_retimer;
+ info->nums_fvl = fme->board_info.nums_of_fvl;
+ info->ports_per_fvl = fme->board_info.ports_per_fvl;
+
+ /* The speed of PKVL is identical the eth group's speed */
+ info->support_speed = fme_get_eth_group_speed(fme,
+ LINE_SIDE_GROUP_ID);
+
+ return 0;
+}
+
+int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
+ struct opae_retimer_status *status)
+{
+ struct intel_max10_device *dev;
+ unsigned int val;
+
+ dev = (struct intel_max10_device *)fme->max10_dev;
+ if (!dev)
+ return -ENODEV;
+
+ if (max10_sys_read(dev, PKVL_LINK_STATUS, &val)) {
+ dev_err(dev, "%s: read pkvl status fail\n", __func__);
+ return -EINVAL;
+ }
+
+ /* The speed of PKVL is identical the eth group's speed */
+ status->speed = fme_get_eth_group_speed(fme,
+ LINE_SIDE_GROUP_ID);
+
+ status->line_link_bitmap = val;
+
+ dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
+ status->speed,
+ status->line_link_bitmap);
+
+ return 0;
+}
+
+int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
+ struct opae_sensor_info *sensor,
+ unsigned int *value)
+{
+ struct intel_max10_device *dev;
+
+ dev = (struct intel_max10_device *)fme->max10_dev;
+ if (!dev)
+ return -ENODEV;
+
+ if (max10_sys_read(dev, sensor->value_reg, value)) {
+ dev_err(dev, "%s: read sensor value register 0x%x fail\n",
+ __func__, sensor->value_reg);
+ return -EINVAL;
+ }
+
+ *value *= sensor->multiplier;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_dperf.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_dperf.c
new file mode 100644
index 000000000..954f8a8c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_dperf.c
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_clk_ctr clk;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ clk.afu_interf_clock = readq(&dperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_header header;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ header.csr = readq(&dperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define DPERF_TIMEOUT 30
+
+static bool fabric_pobj_is_enabled(int port_id,
+ struct feature_fme_dperf *dperf)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum dperf_fab_events fab_event)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dfpmon_fab_ctr ctr;
+ struct feature_fme_dperf *dperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, dperf))
+ goto exit;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &dperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&dperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
+
+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_dperf *dperf;
+ int status;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ status = fabric_pobj_is_enabled(port_id, dperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dperf *dperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, dperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&dperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+
+static int fme_global_dperf_init(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf Init.\n");
+
+ return 0;
+}
+
+static void fme_global_dperf_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf UInit.\n");
+}
+
+static int fme_dperf_fab_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_dperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_dperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* MMIO_READ */
+ return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
+ case 0x5: /* MMIO_WRITE */
+ return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
+ case 0x6: /* ENABLE */
+ return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_root_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_dperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_dperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_dperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_fab_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE - fab root only prop */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_dperf_set_fab_freeze(fme, prop->data);
+ case 0x6: /* ENABLE - fab both root and sub */
+ return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct ifpga_feature_ops fme_global_dperf_ops = {
+ .init = fme_global_dperf_init,
+ .uinit = fme_global_dperf_uinit,
+ .get_prop = fme_global_dperf_get_prop,
+ .set_prop = fme_global_dperf_set_prop,
+
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_error.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_error.c
new file mode 100644
index 000000000..5905eac26
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_error.c
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ *val = fme_error0.csr;
+
+ return 0;
+}
+
+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_first_error fme_first_err;
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ *val = fme_first_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_next_error fme_next_err;
+
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+ *val = fme_next_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ spinlock_lock(&fme->lock);
+
+ writeq(val, &fme_err->fme_err);
+
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_header header;
+
+ header.csr = readq(&fme_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ *val = pcie0_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ if (val != pcie0_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
+ &fme_err->pcie0_err);
+
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ *val = pcie1_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ if (val != pcie1_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
+ &fme_err->pcie1_err);
+
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+
+ ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
+ *val = ras_nonfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+
+ ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
+ *val = ras_catfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+ *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
+
+ return 0;
+}
+
+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ spinlock_lock(&fme->lock);
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+
+ if (val <= FME_RAS_ERROR_INJ_MASK) {
+ ras_error_inj.csr = val;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static void fme_error_enable(struct ifpga_fme_hw *fme)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ writeq(0UL, &fme_err->ras_nonfat_mask);
+ writeq(0UL, &fme_err->ras_catfat_mask);
+}
+
+static int fme_global_error_init(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ fme_error_enable(fme);
+
+ if (feature->ctx_num)
+ fme->capability |= FPGA_FME_CAP_ERR_IRQ;
+
+ return 0;
+}
+
+static void fme_global_error_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int fme_err_check_seu(struct feature_fme_err *fme_err)
+{
+ struct feature_fme_error_capability error_cap;
+
+ error_cap.csr = readq(&fme_err->fme_err_capability);
+
+ return error_cap.seu_support ? 1 : 0;
+}
+
+static int fme_err_get_seu_emr(struct ifpga_fme_hw *fme,
+ u64 *val, bool high)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ if (!fme_err_check_seu(fme_err))
+ return -ENODEV;
+
+ if (high)
+ *val = readq(&fme_err->seu_emr_h);
+ else
+ *val = readq(&fme_err->seu_emr_l);
+
+ return 0;
+}
+
+static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* ERRORS */
+ return fme_err_get_errors(fme, &prop->data);
+ case 0x2: /* FIRST_ERROR */
+ return fme_err_get_first_error(fme, &prop->data);
+ case 0x3: /* NEXT_ERROR */
+ return fme_err_get_next_error(fme, &prop->data);
+ case 0x5: /* SEU EMR LOW */
+ return fme_err_get_seu_emr(fme, &prop->data, 0);
+ case 0x6: /* SEU EMR HIGH */
+ return fme_err_get_seu_emr(fme, &prop->data, 1);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x5: /* REVISION */
+ return fme_err_get_revision(fme, &prop->data);
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_get_pcie0_errors(fme, &prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_get_pcie1_errors(fme, &prop->data);
+ case 0x8: /* NONFATAL_ERRORS */
+ return fme_err_get_nonfatal_errors(fme, &prop->data);
+ case 0x9: /* CATFATAL_ERRORS */
+ return fme_err_get_catfatal_errors(fme, &prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_get_inject_errors(fme, &prop->data);
+ case 0xb: /* REVISION*/
+ return fme_err_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_get_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x4: /* CLEAR */
+ return fme_err_set_clear(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_set_pcie0_errors(fme, prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_set_pcie1_errors(fme, prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_set_inject_errors(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_set_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
+{
+ struct fpga_fme_err_irq_set *err_irq_set = irq_set;
+ struct ifpga_fme_hw *fme;
+ int ret;
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+
+ if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
+ return -ENODEV;
+
+ spinlock_lock(&fme->lock);
+ ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+struct ifpga_feature_ops fme_global_err_ops = {
+ .init = fme_global_error_init,
+ .uinit = fme_global_error_uinit,
+ .get_prop = fme_global_error_get_prop,
+ .set_prop = fme_global_error_set_prop,
+ .set_irq = fme_global_err_set_irq,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_iperf.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_iperf.c
new file mode 100644
index 000000000..70543b953
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_iperf.c
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_clk_ctr clk;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ clk.afu_interf_clock = readq(&iperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_header header;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ header.csr = readq(&iperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ *freeze = (u64)ctl.freeze;
+ return 0;
+}
+
+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->ch_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define IPERF_TIMEOUT 30
+
+static u64 read_cache_counter(struct ifpga_fme_hw *fme,
+ u8 channel, enum iperf_cache_events event)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* set channel access type and cache event code. */
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.cci_chsel = channel;
+ ctl.cache_event = event;
+ writeq(ctl.csr, &iperf->ch_ctl);
+
+ /* check the event type in the counter registers */
+ ctr0.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr0,
+ &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr0.csr = readq(&iperf->ch_ctr0);
+ ctr1.csr = readq(&iperf->ch_ctr1);
+ counter = ctr0.cache_counter + ctr1.cache_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define CACHE_SHOW(name, type, event) \
+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_cache_counter(fme, type, event); \
+ return 0; \
+}
+
+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN);
+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN);
+
+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
+ enum iperf_vtd_sip_events event)
+{
+ struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
+ sip_ctl.vtd_evtcode = event;
+ writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
+
+ sip_ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, sip_ctr,
+ &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
+ counter = sip_ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_SIP_SHOW(name, event) \
+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_iommu_sip_counter(fme, event); \
+ return 0; \
+}
+
+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
+
+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_vtd_events base_event)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_ifpmon_vtd_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ enum iperf_vtd_events event = base_event + port_id;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.vtd_evtcode = event;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+
+ ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->vtd_ctr);
+ counter = ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_PORT_SHOW(name, base_event) \
+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_iommu_counter(fme, port_id, base_event); \
+ return 0; \
+}
+
+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
+
+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_fab_events fab_event)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_ifpmon_fab_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, iperf))
+ goto exit;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &iperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
+
+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_iperf *iperf;
+ int status;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ status = fabric_pobj_is_enabled(port_id, iperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, iperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&iperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+#define FME_IPERF_CAP_IOMMU 0x1
+
+static int fme_global_iperf_init(struct ifpga_feature *feature)
+{
+ struct ifpga_fme_hw *fme;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+
+ dev_info(NULL, "FME global_iperf Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ /* check if iommu is not supported on this device. */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ dev_info(NULL, "FME HEAD fme_capability %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ if (fme_capability.iommu_support)
+ feature->cap |= FME_IPERF_CAP_IOMMU;
+
+ return 0;
+}
+
+static void fme_global_iperf_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_iperf UInit.\n");
+}
+
+static int fme_iperf_root_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_iperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_iperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_cache_freeze(fme, &prop->data);
+ case 0x2: /* READ_HIT */
+ return fme_iperf_get_cache_read_hit(fme, &prop->data);
+ case 0x3: /* READ_MISS */
+ return fme_iperf_get_cache_read_miss(fme, &prop->data);
+ case 0x4: /* WRITE_HIT */
+ return fme_iperf_get_cache_write_hit(fme, &prop->data);
+ case 0x5: /* WRITE_MISS */
+ return fme_iperf_get_cache_write_miss(fme, &prop->data);
+ case 0x6: /* HOLD_REQUEST */
+ return fme_iperf_get_cache_hold_request(fme, &prop->data);
+ case 0x7: /* TX_REQ_STALL */
+ return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
+ case 0x8: /* RX_REQ_STALL */
+ return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
+ case 0x9: /* RX_EVICTION */
+ return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
+ case 0xa: /* DATA_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_data_write_port_contention(fme,
+ &prop->data);
+ case 0xb: /* TAG_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_tag_write_port_contention(fme,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_root_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_vtd_freeze(fme, &prop->data);
+ case 0x2: /* IOTLB_4K_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
+ case 0x3: /* IOTLB_2M_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
+ case 0x4: /* IOTLB_1G_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
+ case 0x5: /* SLPWC_L3_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
+ case 0x6: /* SLPWC_L4_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
+ case 0x7: /* RCC_HIT */
+ return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
+ case 0x8: /* IOTLB_4K_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
+ case 0x9: /* IOTLB_2M_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
+ case 0xa: /* IOTLB_1G_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
+ case 0xb: /* SLPWC_L3_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
+ case 0xc: /* SLPWC_L4_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
+ case 0xd: /* RCC_MISS */
+ return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_sub_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub > PERF_MAX_PORT_NUM)
+ return -ENOENT;
+
+ switch (id) {
+ case 0xe: /* READ_TRANSACTION */
+ return fme_iperf_get_vtd_port_read_transaction(fme, sub,
+ &prop->data);
+ case 0xf: /* WRITE_TRANSACTION */
+ return fme_iperf_get_vtd_port_write_transaction(fme, sub,
+ &prop->data);
+ case 0x10: /* DEVTLB_READ_HIT */
+ return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
+ &prop->data);
+ case 0x11: /* DEVTLB_WRITE_HIT */
+ return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
+ &prop->data);
+ case 0x12: /* DEVTLB_4K_FILL */
+ return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
+ &prop->data);
+ case 0x13: /* DEVTLB_2M_FILL */
+ return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
+ &prop->data);
+ case 0x14: /* DEVTLB_1G_FILL */
+ return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED)
+ return fme_iperf_vtd_root_get_prop(feature, prop);
+
+ return fme_iperf_vtd_sub_get_prop(feature, prop);
+}
+
+static int fme_iperf_fab_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ /* Other properties are present for both top and sub levels */
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_iperf_get_fab_port_pcie0_read(fme, sub,
+ &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_iperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* PCIE1_READ */
+ return fme_iperf_get_fab_port_pcie1_read(fme, sub,
+ &prop->data);
+ case 0x5: /* PCIE1_WRITE */
+ return fme_iperf_get_fab_port_pcie1_write(fme, sub,
+ &prop->data);
+ case 0x6: /* UPI_READ */
+ return fme_iperf_get_fab_port_upi_read(fme, sub,
+ &prop->data);
+ case 0x7: /* UPI_WRITE */
+ return fme_iperf_get_fab_port_upi_write(fme, sub,
+ &prop->data);
+ case 0x8: /* MMIO_READ */
+ return fme_iperf_get_fab_port_mmio_read(fme, sub,
+ &prop->data);
+ case 0x9: /* MMIO_WRITE */
+ return fme_iperf_get_fab_port_mmio_write(fme, sub,
+ &prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_get_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_get_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_iperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_cache_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_vtd_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_fab_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_set_fab_freeze(fme, prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_set_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_set_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct ifpga_feature_ops fme_global_iperf_ops = {
+ .init = fme_global_iperf_init,
+ .uinit = fme_global_iperf_uinit,
+ .get_prop = fme_global_iperf_get_prop,
+ .set_prop = fme_global_iperf_set_prop,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_pr.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_pr.c
new file mode 100644
index 000000000..9997942d2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_fme_pr.c
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static u64
+pr_err_handle(struct feature_fme_pr *fme_pr)
+{
+ struct feature_fme_pr_status fme_pr_status;
+ unsigned long err_code;
+ u64 fme_pr_error;
+ int i;
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ if (!fme_pr_status.pr_status)
+ return 0;
+
+ err_code = readq(&fme_pr->ccip_fme_pr_err);
+ fme_pr_error = err_code;
+
+ for (i = 0; i < PR_MAX_ERR_NUM; i++) {
+ if (err_code & (1 << i))
+ dev_info(NULL, "%s\n", pr_err_msg[i]);
+ }
+
+ writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
+ return fme_pr_error;
+}
+
+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
+ return -EINVAL;
+
+ dev_info(fme_dev, "resetting PR before initiated PR\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ fme_pr_ctl.pr_reset_ack = 1;
+
+ if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 0;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
+
+ fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
+
+ if (fpga_wait_register_field(pr_host_status, fme_pr_status,
+ &fme_pr->ccip_fme_pr_status,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "check if have any previous PR error\n");
+ pr_err_handle(fme_pr);
+ return 0;
+}
+
+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
+ int port_id, const char *buf, size_t count,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+ struct feature_fme_pr_data fme_pr_data;
+ int delay, pr_credit;
+ int ret = 0;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ dev_info(fme_dev, "set PR port ID and start request\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_regionid = port_id;
+ fme_pr_ctl.pr_start_req = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "pushing data from bitstream to HW\n");
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+
+ while (count > 0) {
+ delay = 0;
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(fme_dev, "maximum try\n");
+
+ info->pr_err = pr_err_handle(fme_pr);
+ return info->pr_err ? -EIO : -ETIMEDOUT;
+ }
+ udelay(1);
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+ };
+
+ if (count >= fme_dev->pr_bandwidth) {
+ switch (fme_dev->pr_bandwidth) {
+ case 4:
+ fme_pr_data.rsvd = 0;
+ fme_pr_data.pr_data_raw = *((const u32 *)buf);
+ writeq(fme_pr_data.csr,
+ &fme_pr->ccip_fme_pr_data);
+ break;
+ default:
+ ret = -EFAULT;
+ goto done;
+ }
+
+ buf += fme_dev->pr_bandwidth;
+ count -= fme_dev->pr_bandwidth;
+ pr_credit--;
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_push_complete = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "green bitstream push complete\n");
+ dev_info(fme_dev, "waiting for HW to release PR resource\n");
+
+ fme_pr_ctl.pr_start_req = 0;
+
+ if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ printf("maximum try.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "PR operation complete, checking status\n");
+ info->pr_err = pr_err_handle(fme_pr);
+ if (info->pr_err)
+ return -EIO;
+
+ dev_info(fme_dev, "PR done successfully\n");
+ return 0;
+}
+
+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ info->state = FPGA_PR_STATE_WRITE_INIT;
+ ret = fme_pr_write_init(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error preparing FPGA for writing\n");
+ info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ /*
+ * Write the FPGA image to the FPGA.
+ */
+ info->state = FPGA_PR_STATE_WRITE;
+ ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
+ if (ret) {
+ dev_err(fme_dev, "Error while writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ /*
+ * After all the FPGA image has been written, do the device specific
+ * steps to finish and set the FPGA into operating mode.
+ */
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE;
+ ret = fme_pr_write_complete(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error after writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ info->state = FPGA_PR_STATE_DONE;
+
+ return 0;
+}
+
+static int fme_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer,
+ u32 size, u64 *status)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct fpga_pr_info info;
+ struct ifpga_port_hw *port;
+ int ret = 0;
+
+ if (!buffer || size == 0)
+ return -EINVAL;
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ /*
+ * Padding extra zeros to align PR buffer with PR bandwidth, HW will
+ * ignore these zeros automatically.
+ */
+ size = IFPGA_ALIGN(size, fme->pr_bandwidth);
+
+ /* get fme header region */
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_HEADER);
+ if (!fme_hdr)
+ return -EINVAL;
+
+ /* check port id */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ if (port_id >= fme_capability.num_ports) {
+ dev_err(fme, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ opae_memset(&info, 0, sizeof(struct fpga_pr_info));
+ info.flags = FPGA_MGR_PARTIAL_RECONFIG;
+ info.port_id = port_id;
+
+ spinlock_lock(&fme->lock);
+
+ /* get port device by port_id */
+ port = &hw->port[port_id];
+
+ /* Disable Port before PR */
+ fpga_port_disable(port);
+
+ ret = fpga_pr_buf_load(fme, &info, buffer, size);
+
+ *status = info.pr_err;
+
+ /* Re-enable Port after PR finished */
+ fpga_port_enable(port);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer,
+ u32 size, u64 *status)
+{
+ const struct bts_header *bts_hdr;
+ const char *buf;
+ struct ifpga_port_hw *port;
+ int ret;
+ u32 header_size;
+
+ if (!buffer || size == 0) {
+ dev_err(hw, "invalid parameter\n");
+ return -EINVAL;
+ }
+
+ bts_hdr = (const struct bts_header *)buffer;
+
+ if (is_valid_bts(bts_hdr)) {
+ dev_info(hw, "this is a valid bitsteam..\n");
+ header_size = sizeof(struct bts_header) +
+ bts_hdr->metadata_len;
+ if (size < header_size)
+ return -EINVAL;
+ size -= header_size;
+ buf = buffer + header_size;
+ } else {
+ dev_err(hw, "this is an invalid bitstream..\n");
+ return -EINVAL;
+ }
+
+ /* clean port error before do PR */
+ port = &hw->port[port_id];
+ ret = port_clear_error(port);
+ if (ret) {
+ dev_err(hw, "port cannot clear error\n");
+ return -EINVAL;
+ }
+
+ return fme_pr(hw, port_id, buf, size, status);
+}
+
+static int fme_pr_mgmt_init(struct ifpga_feature *feature)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_header fme_pr_header;
+ struct ifpga_fme_hw *fme;
+
+ dev_info(NULL, "FME PR MGMT Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+
+ fme_pr = (struct feature_fme_pr *)feature->addr;
+
+ fme_pr_header.csr = readq(&fme_pr->header);
+ if (fme_pr_header.revision == 2) {
+ dev_info(NULL, "using 512-bit PR\n");
+ fme->pr_bandwidth = 64;
+ } else {
+ dev_info(NULL, "using 32-bit PR\n");
+ fme->pr_bandwidth = 4;
+ }
+
+ return 0;
+}
+
+static void fme_pr_mgmt_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME PR MGMT UInit.\n");
+}
+
+struct ifpga_feature_ops fme_pr_mgmt_ops = {
+ .init = fme_pr_mgmt_init,
+ .uinit = fme_pr_mgmt_uinit,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_hw.h b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_hw.h
new file mode 100644
index 000000000..7c3307fe7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_hw.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_HW_H_
+#define _IFPGA_HW_H_
+
+#include "ifpga_defines.h"
+#include "opae_ifpga_hw_api.h"
+#include "opae_eth_group.h"
+
+/** List of private feateues */
+TAILQ_HEAD(ifpga_feature_list, ifpga_feature);
+
+enum ifpga_feature_state {
+ IFPGA_FEATURE_UNUSED = 0,
+ IFPGA_FEATURE_ATTACHED,
+};
+
+enum feature_type {
+ FEATURE_FME_TYPE = 0,
+ FEATURE_PORT_TYPE,
+};
+
+struct feature_irq_ctx {
+ int eventfd;
+ int idx;
+};
+
+struct ifpga_feature {
+ TAILQ_ENTRY(ifpga_feature)next;
+ enum ifpga_feature_state state;
+ enum feature_type type;
+ const char *name;
+ u64 id;
+ u8 *addr;
+ uint64_t phys_addr;
+ u32 size;
+ int revision;
+ u64 cap;
+ int vfio_dev_fd;
+ struct feature_irq_ctx *ctx;
+ unsigned int ctx_num;
+
+ void *parent; /* to parent hw data structure */
+
+ struct ifpga_feature_ops *ops;/* callback to this private feature */
+ unsigned int vec_start;
+ unsigned int vec_cnt;
+};
+
+struct ifpga_feature_ops {
+ int (*init)(struct ifpga_feature *feature);
+ void (*uinit)(struct ifpga_feature *feature);
+ int (*get_prop)(struct ifpga_feature *feature,
+ struct feature_prop *prop);
+ int (*set_prop)(struct ifpga_feature *feature,
+ struct feature_prop *prop);
+ int (*set_irq)(struct ifpga_feature *feature, void *irq_set);
+};
+
+enum ifpga_fme_state {
+ IFPGA_FME_UNUSED = 0,
+ IFPGA_FME_IMPLEMENTED,
+};
+
+struct ifpga_fme_hw {
+ enum ifpga_fme_state state;
+
+ struct ifpga_feature_list feature_list;
+ spinlock_t lock; /* protect hardware access */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ /* provied by HEADER feature */
+ u32 port_num;
+ struct uuid bitstream_id;
+ u64 bitstream_md;
+ size_t pr_bandwidth;
+ u32 socket_id;
+ u32 fabric_version_id;
+ u32 cache_size;
+
+ u32 capability;
+
+ void *max10_dev; /* MAX10 device */
+ void *i2c_master; /* I2C Master device */
+ void *eth_dev[MAX_ETH_GROUP_DEVICES];
+ struct opae_reg_region
+ eth_group_region[MAX_ETH_GROUP_DEVICES];
+ struct opae_board_info board_info;
+ int nums_eth_dev;
+ unsigned int nums_acc_region;
+};
+
+enum ifpga_port_state {
+ IFPGA_PORT_UNUSED = 0,
+ IFPGA_PORT_ATTACHED,
+ IFPGA_PORT_DETACHED,
+};
+
+struct ifpga_port_hw {
+ enum ifpga_port_state state;
+
+ struct ifpga_feature_list feature_list;
+ spinlock_t lock; /* protect access to hw */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ int port_id; /* provied by HEADER feature */
+ struct uuid afu_id; /* provied by User AFU feature */
+
+ unsigned int disable_count;
+
+ u32 capability;
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+ u8 *stp_addr;
+ u32 stp_size;
+};
+
+#define AFU_MAX_REGION 1
+
+struct ifpga_afu_info {
+ struct opae_reg_region region[AFU_MAX_REGION];
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct ifpga_hw {
+ struct opae_adapter *adapter;
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_fme_hw fme;
+ struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
+};
+
+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
+{
+ return hw->fme.state != IFPGA_FME_UNUSED;
+}
+
+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
+{
+ if (port_id >= MAX_FPGA_PORT_NUM ||
+ hw->port[port_id].state != IFPGA_PORT_ATTACHED)
+ return false;
+
+ return true;
+}
+#endif /* _IFPGA_HW_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port.c
new file mode 100644
index 000000000..c0aaf012f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct ifpga_feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct ifpga_feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
+{
+ struct ifpga_feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)
+{
+ struct feature_port_header *port_hdr
+ = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&port_hdr->header);
+
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ capability.csr = readq(&port_hdr->capability);
+ *idx = capability.port_number;
+
+ return 0;
+}
+
+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ control.csr = readq(&port_hdr->control);
+ *val = control.latency_tolerance;
+
+ return 0;
+}
+
+static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap1_event;
+
+ return 0;
+}
+
+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap1_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap2_event;
+
+ return 0;
+}
+
+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap2_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.power_state;
+
+ return 0;
+}
+
+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_hdr_init(struct ifpga_feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port hdr Init.\n");
+
+ fpga_port_reset(port);
+
+ return 0;
+}
+
+static void port_hdr_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port hdr uinit.\n");
+}
+
+static int port_hdr_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_REVISION:
+ return port_get_revision(port, &prop->data);
+ case PORT_HDR_PROP_PORTIDX:
+ return port_get_portidx(port, &prop->data);
+ case PORT_HDR_PROP_LATENCY_TOLERANCE:
+ return port_get_latency_tolerance(port, &prop->data);
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_get_ap1_event(port, &prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_get_ap2_event(port, &prop->data);
+ case PORT_HDR_PROP_POWER_STATE:
+ return port_get_power_state(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_get_userclk_freqcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_get_userclk_freqcntrcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQSTS:
+ return port_get_userclk_freqsts(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_CNTRSTS:
+ return port_get_userclk_freqcntrsts(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_hdr_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_set_ap1_event(port, prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_set_ap2_event(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_set_userclk_freqcmd(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_set_userclk_freqcntrcmd(port, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct ifpga_feature_ops ifpga_rawdev_port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .get_prop = port_hdr_get_prop,
+ .set_prop = port_hdr_set_prop,
+};
+
+static int port_stp_init(struct ifpga_feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port stp Init.\n");
+
+ spinlock_lock(&port->lock);
+ port->stp_addr = feature->addr;
+ port->stp_size = feature->size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_stp_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port stp uinit.\n");
+}
+
+struct ifpga_feature_ops ifpga_rawdev_port_stp_ops = {
+ .init = port_stp_init,
+ .uinit = port_stp_uinit,
+};
+
+static int port_uint_init(struct ifpga_feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "PORT UINT Init.\n");
+
+ spinlock_lock(&port->lock);
+ if (feature->ctx_num) {
+ port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
+ port->num_uafu_irqs = feature->ctx_num;
+ }
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_uint_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT UINT UInit.\n");
+}
+
+static int port_uint_set_irq(struct ifpga_feature *feature, void *irq_set)
+{
+ struct fpga_uafu_irq_set *uafu_irq_set = irq_set;
+ struct ifpga_port_hw *port = feature->parent;
+ int ret;
+
+ if (!(port->capability & FPGA_PORT_CAP_UAFU_IRQ))
+ return -ENODEV;
+
+ spinlock_lock(&port->lock);
+ ret = fpga_msix_set_block(feature, uafu_irq_set->start,
+ uafu_irq_set->count, uafu_irq_set->evtfds);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+struct ifpga_feature_ops ifpga_rawdev_port_uint_ops = {
+ .init = port_uint_init,
+ .uinit = port_uint_uinit,
+ .set_irq = port_uint_set_irq,
+};
+
+static int port_afu_init(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT AFU Init.\n");
+
+ return 0;
+}
+
+static void port_afu_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT AFU UInit.\n");
+}
+
+struct ifpga_feature_ops ifpga_rawdev_port_afu_ops = {
+ .init = port_afu_init,
+ .uinit = port_afu_uinit,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port_error.c b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port_error.c
new file mode 100644
index 000000000..189f7629b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_port_error.c
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_header header;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ header.csr = readq(&port_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+ *val = error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_first_err_key first_error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ first_error.csr = readq(&port_err->port_first_error);
+ *val = first_error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req0 malreq0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq0.header_lsb = readq(&port_err->malreq0);
+ *val = malreq0.header_lsb;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req1 malreq1;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq1.header_msb = readq(&port_err->malreq1);
+ *val = malreq1.header_msb;
+
+ return 0;
+}
+
+static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = port_err_clear(port, val);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+static int port_error_init(struct ifpga_feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port error Init.\n");
+
+ spinlock_lock(&port->lock);
+ port_err_mask(port, false);
+ if (feature->ctx_num)
+ port->capability |= FPGA_PORT_CAP_ERR_IRQ;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_error_uinit(struct ifpga_feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int port_error_get_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_ERR_PROP_REVISION:
+ return port_err_get_revision(port, &prop->data);
+ case PORT_ERR_PROP_ERRORS:
+ return port_err_get_errors(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_ERROR:
+ return port_err_get_first_error(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:
+ return port_err_get_first_malformed_req_lsb(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:
+ return port_err_get_first_malformed_req_msb(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_error_set_prop(struct ifpga_feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ if (prop->prop_id == PORT_ERR_PROP_CLEAR)
+ return port_err_set_clear(port, prop->data);
+
+ return -ENOENT;
+}
+
+static int port_error_set_irq(struct ifpga_feature *feature, void *irq_set)
+{
+ struct fpga_port_err_irq_set *err_irq_set = irq_set;
+ struct ifpga_port_hw *port;
+ int ret;
+
+ port = feature->parent;
+
+ if (!(port->capability & FPGA_PORT_CAP_ERR_IRQ))
+ return -ENODEV;
+
+ spinlock_lock(&port->lock);
+ ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+struct ifpga_feature_ops ifpga_rawdev_port_error_ops = {
+ .init = port_error_init,
+ .uinit = port_error_uinit,
+ .get_prop = port_error_get_prop,
+ .set_prop = port_error_set_prop,
+ .set_irq = port_error_set_irq,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/meson.build b/src/spdk/dpdk/drivers/raw/ifpga/base/meson.build
new file mode 100644
index 000000000..b13e13e89
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = [
+ 'ifpga_api.c',
+ 'ifpga_enumerate.c',
+ 'ifpga_feature_dev.c',
+ 'ifpga_fme.c',
+ 'ifpga_fme_iperf.c',
+ 'ifpga_fme_dperf.c',
+ 'ifpga_fme_error.c',
+ 'ifpga_port.c',
+ 'ifpga_port_error.c',
+ 'ifpga_fme_pr.c',
+ 'opae_hw_api.c',
+ 'opae_ifpga_hw_api.c',
+ 'opae_debug.c',
+ 'opae_spi.c',
+ 'opae_spi_transaction.c',
+ 'opae_intel_max10.c',
+ 'opae_i2c.c',
+ 'opae_at24_eeprom.c',
+ 'opae_eth_group.c',
+]
+
+base_lib = static_library('ifpga_rawdev_base', sources,
+ dependencies: static_rte_eal,
+ c_args: cflags)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.c
new file mode 100644
index 000000000..6f834ac04
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.c
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include "opae_osdep.h"
+#include "opae_i2c.h"
+#include "opae_at24_eeprom.h"
+
+#define AT24_READ_RETRY 10
+
+static int at24_eeprom_read_and_try(struct altera_i2c_dev *dev,
+ unsigned int slave_addr,
+ u32 offset, u8 *buf, u32 len)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < AT24_READ_RETRY; i++) {
+ ret = i2c_read16(dev, slave_addr, offset,
+ buf, len);
+ if (ret == 0)
+ break;
+
+ opae_udelay(100);
+ }
+
+ return ret;
+}
+
+int at24_eeprom_read(struct altera_i2c_dev *dev, unsigned int slave_addr,
+ u32 offset, u8 *buf, int count)
+{
+ int len;
+ int status;
+ int read_count = 0;
+
+ if (!count)
+ return count;
+
+ if (count > AT24C512_IO_LIMIT)
+ len = AT24C512_IO_LIMIT;
+ else
+ len = count;
+
+ while (count) {
+ status = at24_eeprom_read_and_try(dev, slave_addr, offset,
+ buf, len);
+ if (status)
+ break;
+
+ buf += len;
+ offset += len;
+ count -= len;
+ read_count += len;
+ }
+
+ return read_count;
+}
+
+int at24_eeprom_write(struct altera_i2c_dev *dev, unsigned int slave_addr,
+ u32 offset, u8 *buf, int count)
+{
+ int len;
+ int status;
+ int write_count = 0;
+
+ if (!count)
+ return count;
+
+ if (count > AT24C512_PAGE_SIZE)
+ len = AT24C512_PAGE_SIZE;
+ else
+ len = count;
+
+ while (count) {
+ status = i2c_write16(dev, slave_addr, offset, buf, len);
+ if (status)
+ break;
+
+ buf += len;
+ offset += len;
+ count -= len;
+ write_count += len;
+ }
+
+ return write_count;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.h
new file mode 100644
index 000000000..caae9a3ec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_at24_eeprom.h
@@ -0,0 +1,14 @@
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#define AT24C512_PAGE_SIZE 128
+#define AT24C512_IO_LIMIT 128
+
+#define AT24512_SLAVE_ADDR 0x51
+
+int at24_eeprom_read(struct altera_i2c_dev *dev, unsigned int slave_addr,
+ u32 offset, u8 *buf, int count);
+int at24_eeprom_write(struct altera_i2c_dev *dev, unsigned int slave_addr,
+ u32 offset, u8 *buf, int count);
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.c
new file mode 100644
index 000000000..dad3ea39f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#define OPAE_HW_DEBUG
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+
+void opae_manager_dump(struct opae_manager *mgr)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Manger %s\n", mgr->name);
+ opae_log("OPAE Manger OPs = %p\n", mgr->ops);
+ opae_log("OPAE Manager Private Data = %p\n", mgr->data);
+ opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
+ opae_log("==========================\n");
+}
+
+void opae_bridge_dump(struct opae_bridge *br)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Bridge %s\n", br->name);
+ opae_log("OPAE Bridge ID = %d\n", br->id);
+ opae_log("OPAE Bridge OPs = %p\n", br->ops);
+ opae_log("OPAE Bridge Private Data = %p\n", br->data);
+ opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
+ opae_log("==========================\n");
+}
+
+void opae_accelerator_dump(struct opae_accelerator *acc)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Accelerator %s\n", acc->name);
+ opae_log("OPAE Accelerator Index = %d\n", acc->index);
+ opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
+ opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
+ opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
+ opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
+ opae_log("==========================\n");
+
+ if (acc->br)
+ opae_bridge_dump(acc->br);
+}
+
+static void opae_adapter_data_dump(void *data)
+{
+ struct opae_adapter_data *d = data;
+ struct opae_adapter_data_pci *d_pci;
+ struct opae_reg_region *r;
+ int i;
+
+ opae_log("=====%s=====\n", __func__);
+
+ switch (d->type) {
+ case OPAE_FPGA_PCI:
+ d_pci = (struct opae_adapter_data_pci *)d;
+
+ opae_log("OPAE Adapter Type = PCI\n");
+ opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
+ opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
+ opae_log("PCI bus: 0x%04x\n", d_pci->bus);
+ opae_log("PCI devid: 0x%04x\n", d_pci->devid);
+ opae_log("PCI function: 0x%04x\n", d_pci->function);
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ r = &d_pci->region[i];
+ opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
+ i, (unsigned long long)r->phys_addr,
+ (unsigned long long)r->len, r->addr);
+ }
+ break;
+ case OPAE_FPGA_NET:
+ break;
+ }
+
+ opae_log("==========================\n");
+}
+
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
+{
+ struct opae_accelerator *acc;
+
+ if (verbose) {
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Adapter %s\n", adapter->name);
+ opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
+ opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
+ opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
+
+ if (adapter->mgr)
+ opae_manager_dump(adapter->mgr);
+
+ opae_adapter_for_each_acc(adapter, acc)
+ opae_accelerator_dump(acc);
+
+ if (adapter->data)
+ opae_adapter_data_dump(adapter->data);
+
+ opae_log("==========================\n");
+ }
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.h
new file mode 100644
index 000000000..a03dff926
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_DEBUG_H_
+#define _OPAE_DEBUG_H_
+
+#ifdef OPAE_HW_DEBUG
+#define opae_log(fmt, args...) printf(fmt, ## args)
+#else
+#define opae_log(fme, args...) do {} while (0)
+#endif
+
+void opae_manager_dump(struct opae_manager *mgr);
+void opae_bridge_dump(struct opae_bridge *br);
+void opae_accelerator_dump(struct opae_accelerator *acc);
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
+
+#endif /* _OPAE_DEBUG_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.c
new file mode 100644
index 000000000..be28954e0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.c
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include "opae_osdep.h"
+#include "opae_eth_group.h"
+
+#define DATA_VAL_INVL 1 /* us */
+#define DATA_VAL_POLL_TIMEOUT 10 /* us */
+
+static const char *eth_type_to_string(u8 type)
+{
+ switch (type) {
+ case ETH_GROUP_PHY:
+ return "phy";
+ case ETH_GROUP_MAC:
+ return "mac";
+ case ETH_GROUP_ETHER:
+ return "ethernet wrapper";
+ }
+
+ return "unknown";
+}
+
+static int eth_group_get_select(struct eth_group_device *dev,
+ u8 type, u8 index, u8 *select)
+{
+ /*
+ * in different speed configuration, the index of
+ * PHY and MAC are different.
+ *
+ * 1 ethernet wrapper -> Device Select 0x0 - fixed value
+ * n PHYs -> Device Select 0x2,4,6,8,A,C,E,10,...
+ * n MACs -> Device Select 0x3,5,7,9,B,D,F,11,...
+ */
+
+ if (type == ETH_GROUP_PHY && index < dev->phy_num)
+ *select = index * 2 + 2;
+ else if (type == ETH_GROUP_MAC && index < dev->mac_num)
+ *select = index * 2 + 3;
+ else if (type == ETH_GROUP_ETHER && index == 0)
+ *select = 0;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int eth_group_write_reg(struct eth_group_device *dev,
+ u8 type, u8 index, u16 addr, u32 data)
+{
+ u8 dev_select = 0;
+ u64 v = 0;
+ int ret;
+
+ dev_debug(dev, "%s type %s index %u addr 0x%x\n",
+ __func__, eth_type_to_string(type), index, addr);
+
+ /* find device select */
+ ret = eth_group_get_select(dev, type, index, &dev_select);
+ if (ret)
+ return ret;
+
+ v = CMD_WR << CTRL_CMD_SHIT |
+ (u64)dev_select << CTRL_DS_SHIFT |
+ (u64)addr << CTRL_ADDR_SHIFT |
+ (data & CTRL_WR_DATA);
+
+ /* only PHY has additional feature bit */
+ if (type == ETH_GROUP_PHY)
+ v |= CTRL_FEAT_SELECT;
+
+ opae_writeq(v, dev->base + ETH_GROUP_CTRL);
+
+ return 0;
+}
+
+int eth_group_read_reg(struct eth_group_device *dev,
+ u8 type, u8 index, u16 addr, u32 *data)
+{
+ u8 dev_select = 0;
+ u64 v = 0;
+ int ret;
+
+ dev_debug(dev, "%s type %s index %u addr 0x%x\n",
+ __func__, eth_type_to_string(type), index,
+ addr);
+
+ /* find device select */
+ ret = eth_group_get_select(dev, type, index, &dev_select);
+ if (ret)
+ return ret;
+
+ v = CMD_RD << CTRL_CMD_SHIT |
+ (u64)dev_select << CTRL_DS_SHIFT |
+ (u64)addr << CTRL_ADDR_SHIFT;
+
+ /* only PHY has additional feature bit */
+ if (type == ETH_GROUP_PHY)
+ v |= CTRL_FEAT_SELECT;
+
+ opae_writeq(v, dev->base + ETH_GROUP_CTRL);
+
+ if (opae_readq_poll_timeout(dev->base + ETH_GROUP_STAT,
+ v, v & STAT_DATA_VAL, DATA_VAL_INVL,
+ DATA_VAL_POLL_TIMEOUT))
+ return -ETIMEDOUT;
+
+ *data = (v & STAT_RD_DATA);
+
+ dev_debug(dev, "%s data 0x%x\n", __func__, *data);
+
+ return 0;
+}
+
+static int eth_group_reset_mac(struct eth_group_device *dev, u8 index,
+ bool enable)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * only support 25G & 40G mac reset for now. It uses internal reset.
+ * as PHY and MAC are integrated together, below action will trigger
+ * PHY reset too.
+ */
+ if (dev->speed != 25 && dev->speed != 40)
+ return 0;
+
+ ret = eth_group_read_reg(dev, ETH_GROUP_MAC, index, MAC_CONFIG,
+ &val);
+ if (ret) {
+ dev_err(dev, "fail to read PHY_CONFIG: %d\n", ret);
+ return ret;
+ }
+
+ /* skip if mac is in expected state already */
+ if ((((val & MAC_RESET_MASK) == MAC_RESET_MASK) && enable) ||
+ (((val & MAC_RESET_MASK) == 0) && !enable))
+ return 0;
+
+ if (enable)
+ val |= MAC_RESET_MASK;
+ else
+ val &= ~MAC_RESET_MASK;
+
+ ret = eth_group_write_reg(dev, ETH_GROUP_MAC, index, MAC_CONFIG,
+ val);
+ if (ret)
+ dev_err(dev, "fail to write PHY_CONFIG: %d\n", ret);
+
+ return ret;
+}
+
+static void eth_group_mac_uinit(struct eth_group_device *dev)
+{
+ u8 i;
+
+ for (i = 0; i < dev->mac_num; i++) {
+ if (eth_group_reset_mac(dev, i, true))
+ dev_err(dev, "fail to disable mac %d\n", i);
+ }
+}
+
+static int eth_group_mac_init(struct eth_group_device *dev)
+{
+ int ret;
+ u8 i;
+
+ for (i = 0; i < dev->mac_num; i++) {
+ ret = eth_group_reset_mac(dev, i, false);
+ if (ret) {
+ dev_err(dev, "fail to enable mac %d\n", i);
+ goto exit;
+ }
+ }
+
+ return 0;
+
+exit:
+ while (i--)
+ eth_group_reset_mac(dev, i, true);
+
+ return ret;
+}
+
+static int eth_group_reset_phy(struct eth_group_device *dev, u8 index,
+ bool enable)
+{
+ u32 val;
+ int ret;
+
+ /* only support 10G PHY reset for now. It uses external reset. */
+ if (dev->speed != 10)
+ return 0;
+
+ ret = eth_group_read_reg(dev, ETH_GROUP_PHY, index,
+ ADD_PHY_CTRL, &val);
+ if (ret) {
+ dev_err(dev, "fail to read ADD_PHY_CTRL reg: %d\n", ret);
+ return ret;
+ }
+
+ /* return if PHY is already in expected state */
+ if ((val & PHY_RESET && enable) || (!(val & PHY_RESET) && !enable))
+ return 0;
+
+ if (enable)
+ val |= PHY_RESET;
+ else
+ val &= ~PHY_RESET;
+
+ ret = eth_group_write_reg(dev, ETH_GROUP_PHY, index,
+ ADD_PHY_CTRL, val);
+ if (ret)
+ dev_err(dev, "fail to write ADD_PHY_CTRL reg: %d\n", ret);
+
+ return ret;
+}
+
+static int eth_group_phy_init(struct eth_group_device *dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < dev->phy_num; i++) {
+ ret = eth_group_reset_phy(dev, i, false);
+ if (ret) {
+ dev_err(dev, "fail to enable phy %d\n", i);
+ goto exit;
+ }
+ }
+
+ return 0;
+exit:
+ while (i--)
+ eth_group_reset_phy(dev, i, true);
+
+ return ret;
+}
+
+static void eth_group_phy_uinit(struct eth_group_device *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->phy_num; i++) {
+ if (eth_group_reset_phy(dev, i, true))
+ dev_err(dev, "fail to disable phy %d\n", i);
+ }
+}
+
+static int eth_group_hw_init(struct eth_group_device *dev)
+{
+ int ret;
+
+ ret = eth_group_phy_init(dev);
+ if (ret) {
+ dev_err(dev, "fail to init eth group phys\n");
+ return ret;
+ }
+
+ ret = eth_group_mac_init(dev);
+ if (ret) {
+ dev_err(priv->dev, "fail to init eth group macs\n");
+ goto phy_exit;
+ }
+
+ return 0;
+
+phy_exit:
+ eth_group_phy_uinit(dev);
+ return ret;
+}
+
+static void eth_group_hw_uinit(struct eth_group_device *dev)
+{
+ eth_group_mac_uinit(dev);
+ eth_group_phy_uinit(dev);
+}
+
+struct eth_group_device *eth_group_probe(void *base)
+{
+ struct eth_group_device *dev;
+
+ dev = opae_malloc(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->base = (u8 *)base;
+
+ dev->info.info = opae_readq(dev->base + ETH_GROUP_INFO);
+ dev->group_id = dev->info.group_id;
+ dev->phy_num = dev->mac_num = dev->info.num_phys;
+ dev->speed = dev->info.speed;
+
+ dev->status = ETH_GROUP_DEV_ATTACHED;
+
+ if (eth_group_hw_init(dev)) {
+ dev_err(dev, "eth group hw init fail\n");
+ return NULL;
+ }
+
+ dev_info(dev, "eth group device %d probe done: phy_num=mac_num:%d, speed=%d\n",
+ dev->group_id, dev->phy_num, dev->speed);
+
+ return dev;
+}
+
+void eth_group_release(struct eth_group_device *dev)
+{
+ if (dev) {
+ eth_group_hw_uinit(dev);
+ dev->status = ETH_GROUP_DEV_NOUSED;
+ opae_free(dev);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.h
new file mode 100644
index 000000000..4868bd0e1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_eth_group.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#ifndef _OPAE_PHY_MAC_H
+#define _OPAE_PHY_MAC_H
+
+#include "opae_osdep.h"
+
+#define MAX_ETH_GROUP_DEVICES 2
+
+#define LINE_SIDE_GROUP_ID 0
+#define HOST_SIDE_GROUP_ID 1
+
+#define ETH_GROUP_SELECT_FEAT 1
+
+#define ETH_GROUP_PHY 1
+#define ETH_GROUP_MAC 2
+#define ETH_GROUP_ETHER 3
+
+#define ETH_GROUP_INFO 0x8
+#define INFO_SPEED GENMASK_ULL(23, 16)
+#define ETH_SPEED_10G 10
+#define ETH_SPEED_25G 25
+#define INFO_PHY_NUM GENMASK_ULL(15, 8)
+#define INFO_GROUP_NUM GENMASK_ULL(7, 0)
+
+#define ETH_GROUP_CTRL 0x10
+#define CTRL_CMD GENMASK_ULL(63, 62)
+#define CTRL_CMD_SHIT 62
+#define CMD_NOP 0ULL
+#define CMD_RD 1ULL
+#define CMD_WR 2ULL
+#define CTRL_DEV_SELECT GENMASK_ULL(53, 49)
+#define CTRL_DS_SHIFT 49
+#define CTRL_FEAT_SELECT BIT_ULL(48)
+#define SELECT_IP 0
+#define SELECT_FEAT 1
+#define CTRL_ADDR GENMASK_ULL(47, 32)
+#define CTRL_ADDR_SHIFT 32
+#define CTRL_WR_DATA GENMASK_ULL(31, 0)
+
+#define ETH_GROUP_STAT 0x18
+#define STAT_DATA_VAL BIT_ULL(32)
+#define STAT_RD_DATA GENMASK_ULL(31, 0)
+
+/* Additional Feature Register */
+#define ADD_PHY_CTRL 0x0
+#define PHY_RESET BIT(0)
+#define MAC_CONFIG 0x310
+#define MAC_RESET_MASK GENMASK(2, 0)
+
+struct opae_eth_group_info {
+ u8 group_id;
+ u8 speed;
+ u8 nums_of_phy;
+ u8 nums_of_mac;
+};
+
+struct opae_eth_group_region_info {
+ u8 group_id;
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+ u8 mem_idx;
+};
+
+struct eth_group_info_reg {
+ union {
+ u64 info;
+ struct {
+ u8 group_id:8;
+ u8 num_phys:8;
+ u8 speed:8;
+ u8 direction:1;
+ u64 resvd:39;
+ };
+ };
+};
+
+enum eth_group_status {
+ ETH_GROUP_DEV_NOUSED = 0,
+ ETH_GROUP_DEV_ATTACHED,
+};
+
+struct eth_group_device {
+ u8 *base;
+ struct eth_group_info_reg info;
+ enum eth_group_status status;
+ u8 speed;
+ u8 group_id;
+ u8 phy_num;
+ u8 mac_num;
+};
+
+struct eth_group_device *eth_group_probe(void *base);
+void eth_group_release(struct eth_group_device *dev);
+int eth_group_read_reg(struct eth_group_device *dev,
+ u8 type, u8 index, u16 addr, u32 *data);
+int eth_group_write_reg(struct eth_group_device *dev,
+ u8 type, u8 index, u16 addr, u32 data);
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.c
new file mode 100644
index 000000000..c969dfed3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.c
@@ -0,0 +1,714 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+#include "ifpga_api.h"
+
+/* OPAE Bridge Functions */
+
+/**
+ * opae_bridge_alloc - alloc opae_bridge data structure
+ * @name: bridge name.
+ * @ops: ops of this bridge.
+ * @data: private data of this bridge.
+ *
+ * Return opae_bridge on success, otherwise NULL.
+ */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
+{
+ struct opae_bridge *br = opae_zmalloc(sizeof(*br));
+
+ if (!br)
+ return NULL;
+
+ br->name = name;
+ br->ops = ops;
+ br->data = data;
+
+ opae_log("%s %p\n", __func__, br);
+
+ return br;
+}
+
+/**
+ * opae_bridge_reset - reset opae_bridge
+ * @br: bridge to be reset.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_bridge_reset(struct opae_bridge *br)
+{
+ if (!br)
+ return -EINVAL;
+
+ if (br->ops && br->ops->reset)
+ return br->ops->reset(br);
+
+ opae_log("%s no ops\n", __func__);
+
+ return -ENOENT;
+}
+
+/* Accelerator Functions */
+
+/**
+ * opae_accelerator_alloc - alloc opae_accelerator data structure
+ * @name: accelerator name.
+ * @ops: ops of this accelerator.
+ * @data: private data of this accelerator.
+ *
+ * Return: opae_accelerator on success, otherwise NULL.
+ */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data)
+{
+ struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
+
+ if (!acc)
+ return NULL;
+
+ acc->name = name;
+ acc->ops = ops;
+ acc->data = data;
+
+ opae_log("%s %p\n", __func__, acc);
+
+ return acc;
+}
+
+/**
+ * opae_acc_reg_read - read accelerator's register from its reg region.
+ * @acc: accelerator to read.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: read operation width, e.g 4 byte = 32bit read.
+ * @data: data to store the value read from the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->read)
+ return acc->ops->read(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_reg_write - write to accelerator's register from its reg region.
+ * @acc: accelerator to write.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: write operation width, e.g 4 byte = 32bit write.
+ * @data: data stored the value to write to the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->write)
+ return acc->ops->write(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_info - get information of an accelerator.
+ * @acc: targeted accelerator
+ * @info: accelerator info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_info)
+ return acc->ops->get_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_region_info - get information of an accelerator register region.
+ * @acc: targeted accelerator
+ * @info: accelerator region info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_region_info)
+ return acc->ops->get_region_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_set_irq - set an accelerator's irq.
+ * @acc: targeted accelerator
+ * @start: start vector number
+ * @count: count of vectors to be set from the start vector
+ * @evtfds: event fds to be notified when corresponding irqs happens
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ if (!acc || !acc->data)
+ return -EINVAL;
+
+ if (start + count <= start)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->set_irq)
+ return acc->ops->set_irq(acc, start, count, evtfds);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_uuid - get accelerator's UUID.
+ * @acc: targeted accelerator
+ * @uuid: a pointer to UUID
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ if (!acc || !uuid)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_uuid)
+ return acc->ops->get_uuid(acc, uuid);
+
+ return -ENOENT;
+}
+
+/* Manager Functions */
+
+/**
+ * opae_manager_alloc - alloc opae_manager data structure
+ * @name: manager name.
+ * @ops: ops of this manager.
+ * @network_ops: ops of network management.
+ * @data: private data of this manager.
+ *
+ * Return: opae_manager on success, otherwise NULL.
+ */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops,
+ struct opae_manager_networking_ops *network_ops, void *data)
+{
+ struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
+
+ if (!mgr)
+ return NULL;
+
+ mgr->name = name;
+ mgr->ops = ops;
+ mgr->network_ops = network_ops;
+ mgr->data = data;
+
+ opae_log("%s %p\n", __func__, mgr);
+
+ return mgr;
+}
+
+/**
+ * opae_manager_flash - flash a reconfiguration image via opae_manager
+ * @mgr: opae_manager for flash.
+ * @id: id of target region (accelerator).
+ * @buf: image data buffer.
+ * @size: buffer size.
+ * @status: status to store flash result.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_flash(struct opae_manager *mgr, int id, const char *buf,
+ u32 size, u64 *status)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (mgr && mgr->ops && mgr->ops->flash)
+ return mgr->ops->flash(mgr, id, buf, size, status);
+
+ return -ENOENT;
+}
+
+/* Adapter Functions */
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @type: opae_adapter_type.
+ *
+ * Return: opae_adapter_data on success, otherwise NULL.
+ */
+void *opae_adapter_data_alloc(enum opae_adapter_type type)
+{
+ struct opae_adapter_data *data;
+ int size;
+
+ switch (type) {
+ case OPAE_FPGA_PCI:
+ size = sizeof(struct opae_adapter_data_pci);
+ break;
+ case OPAE_FPGA_NET:
+ size = sizeof(struct opae_adapter_data_net);
+ break;
+ default:
+ size = sizeof(struct opae_adapter_data);
+ break;
+ }
+
+ data = opae_zmalloc(size);
+ if (!data)
+ return NULL;
+
+ data->type = type;
+
+ return data;
+}
+
+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
+{
+ struct opae_adapter_data *data;
+
+ if (!adapter || !adapter->data)
+ return NULL;
+
+ data = adapter->data;
+
+ if (data->type == OPAE_FPGA_PCI)
+ return &ifpga_adapter_ops;
+
+ return NULL;
+}
+
+/**
+ * opae_adapter_init - init opae_adapter data structure
+ * @adapter: pointer of opae_adapter data structure
+ * @name: adapter name.
+ * @data: private data of this adapter.
+ *
+ * Return: 0 on success.
+ */
+int opae_adapter_init(struct opae_adapter *adapter,
+ const char *name, void *data)
+{
+ if (!adapter)
+ return -ENOMEM;
+
+ TAILQ_INIT(&adapter->acc_list);
+ adapter->data = data;
+ adapter->name = name;
+ adapter->ops = match_ops(adapter);
+
+ return 0;
+}
+
+/**
+ * opae_adapter_enumerate - enumerate this adapter
+ * @adapter: adapter to enumerate.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_adapter_enumerate(struct opae_adapter *adapter)
+{
+ int ret = -ENOENT;
+
+ if (!adapter)
+ return -EINVAL;
+
+ if (adapter->ops && adapter->ops->enumerate)
+ ret = adapter->ops->enumerate(adapter);
+
+ if (!ret)
+ opae_adapter_dump(adapter, 0);
+
+ return ret;
+}
+
+/**
+ * opae_adapter_destroy - destroy this adapter
+ * @adapter: adapter to destroy.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+void opae_adapter_destroy(struct opae_adapter *adapter)
+{
+ if (adapter && adapter->ops && adapter->ops->destroy)
+ adapter->ops->destroy(adapter);
+}
+
+/**
+ * opae_adapter_get_acc - find and return accelerator with matched id
+ * @adapter: adapter to find the accelerator.
+ * @acc_id: id (index) of the accelerator.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
+{
+ struct opae_accelerator *acc = NULL;
+
+ if (!adapter)
+ return NULL;
+
+ opae_adapter_for_each_acc(adapter, acc)
+ if (acc->index == acc_id)
+ return acc;
+
+ return NULL;
+}
+
+/**
+ * opae_manager_read_mac_rom - read the content of the MAC ROM
+ * @mgr: opae_manager for MAC ROM
+ * @port: the port number of retimer
+ * @addr: buffer of the MAC address
+ *
+ * Return: return the bytes of read successfully
+ */
+int opae_manager_read_mac_rom(struct opae_manager *mgr, int port,
+ struct opae_ether_addr *addr)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->read_mac_rom)
+ return mgr->network_ops->read_mac_rom(mgr,
+ port * sizeof(struct opae_ether_addr),
+ addr, sizeof(struct opae_ether_addr));
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_write_mac_rom - write data into MAC ROM
+ * @mgr: opae_manager for MAC ROM
+ * @port: the port number of the retimer
+ * @addr: data of the MAC address
+ *
+ * Return: return written bytes
+ */
+int opae_manager_write_mac_rom(struct opae_manager *mgr, int port,
+ struct opae_ether_addr *addr)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops && mgr->network_ops->write_mac_rom)
+ return mgr->network_ops->write_mac_rom(mgr,
+ port * sizeof(struct opae_ether_addr),
+ addr, sizeof(struct opae_ether_addr));
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_eth_group_nums - get eth group numbers
+ * @mgr: opae_manager for eth group
+ *
+ * Return: the numbers of eth group
+ */
+int opae_manager_get_eth_group_nums(struct opae_manager *mgr)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->get_retimer_info)
+ return mgr->network_ops->get_eth_group_nums(mgr);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_eth_group_info - get eth group info
+ * @mgr: opae_manager for eth group
+ * @group_id: id for eth group
+ * @info: info return to caller
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int opae_manager_get_eth_group_info(struct opae_manager *mgr,
+ u8 group_id, struct opae_eth_group_info *info)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->get_retimer_info)
+ return mgr->network_ops->get_eth_group_info(mgr,
+ group_id, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_eth_group_region_info
+ * @mgr: opae_manager for flash.
+ * @info: the memory region info for eth group
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_get_eth_group_region_info(struct opae_manager *mgr,
+ u8 group_id, struct opae_eth_group_region_info *info)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (group_id >= MAX_ETH_GROUP_DEVICES)
+ return -EINVAL;
+
+ info->group_id = group_id;
+
+ if (mgr && mgr->ops && mgr->ops->get_eth_group_region_info)
+ return mgr->ops->get_eth_group_region_info(mgr, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_eth_group_read_reg - read ETH group register
+ * @mgr: opae_manager for ETH Group
+ * @group_id: ETH group id
+ * @type: eth type
+ * @index: port index in eth group device
+ * @addr: register address of ETH Group
+ * @data: read buffer
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int opae_manager_eth_group_read_reg(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 *data)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->eth_group_reg_read)
+ return mgr->network_ops->eth_group_reg_read(mgr, group_id,
+ type, index, addr, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_eth_group_write_reg - write ETH group register
+ * @mgr: opae_manager for ETH Group
+ * @group_id: ETH group id
+ * @type: eth type
+ * @index: port index in eth group device
+ * @addr: register address of ETH Group
+ * @data: data will write to register
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int opae_manager_eth_group_write_reg(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 data)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->eth_group_reg_write)
+ return mgr->network_ops->eth_group_reg_write(mgr, group_id,
+ type, index, addr, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_retimer_info - get retimer info like PKVL chip
+ * @mgr: opae_manager for retimer
+ * @info: info return to caller
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int opae_manager_get_retimer_info(struct opae_manager *mgr,
+ struct opae_retimer_info *info)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->get_retimer_info)
+ return mgr->network_ops->get_retimer_info(mgr, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_retimer_status - get retimer status
+ * @mgr: opae_manager of retimer
+ * @status: status of retimer
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int opae_manager_get_retimer_status(struct opae_manager *mgr,
+ struct opae_retimer_status *status)
+{
+ if (!mgr || !mgr->network_ops)
+ return -EINVAL;
+
+ if (mgr->network_ops->get_retimer_status)
+ return mgr->network_ops->get_retimer_status(mgr,
+ status);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_sensor_by_id - get sensor device
+ * @id: the id of the sensor
+ *
+ * Return: the pointer of the opae_sensor_info
+ */
+struct opae_sensor_info *
+opae_mgr_get_sensor_by_id(struct opae_manager *mgr,
+ unsigned int id)
+{
+ struct opae_sensor_info *sensor;
+
+ opae_mgr_for_each_sensor(mgr, sensor)
+ if (sensor->id == id)
+ return sensor;
+
+ return NULL;
+}
+
+/**
+ * opae_manager_get_sensor_by_name - get sensor device
+ * @name: the name of the sensor
+ *
+ * Return: the pointer of the opae_sensor_info
+ */
+struct opae_sensor_info *
+opae_mgr_get_sensor_by_name(struct opae_manager *mgr,
+ const char *name)
+{
+ struct opae_sensor_info *sensor;
+
+ opae_mgr_for_each_sensor(mgr, sensor)
+ if (!strcmp(sensor->name, name))
+ return sensor;
+
+ return NULL;
+}
+
+/**
+ * opae_manager_get_sensor_value_by_name - find the sensor by name and read out
+ * the value
+ * @mgr: opae_manager for sensor.
+ * @name: the name of the sensor
+ * @value: the readout sensor value
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int
+opae_mgr_get_sensor_value_by_name(struct opae_manager *mgr,
+ const char *name, unsigned int *value)
+{
+ struct opae_sensor_info *sensor;
+
+ if (!mgr)
+ return -EINVAL;
+
+ sensor = opae_mgr_get_sensor_by_name(mgr, name);
+ if (!sensor)
+ return -ENODEV;
+
+ if (mgr->ops && mgr->ops->get_sensor_value)
+ return mgr->ops->get_sensor_value(mgr, sensor, value);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_sensor_value_by_id - find the sensor by id and readout the
+ * value
+ * @mgr: opae_manager for sensor
+ * @id: the id of the sensor
+ * @value: the readout sensor value
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int
+opae_mgr_get_sensor_value_by_id(struct opae_manager *mgr,
+ unsigned int id, unsigned int *value)
+{
+ struct opae_sensor_info *sensor;
+
+ if (!mgr)
+ return -EINVAL;
+
+ sensor = opae_mgr_get_sensor_by_id(mgr, id);
+ if (!sensor)
+ return -ENODEV;
+
+ if (mgr->ops && mgr->ops->get_sensor_value)
+ return mgr->ops->get_sensor_value(mgr, sensor, value);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_sensor_value - get the current
+ * sensor value
+ * @mgr: opae_manager for sensor
+ * @sensor: opae_sensor_info for sensor
+ * @value: the readout sensor value
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int
+opae_mgr_get_sensor_value(struct opae_manager *mgr,
+ struct opae_sensor_info *sensor,
+ unsigned int *value)
+{
+ if (!mgr || !sensor)
+ return -EINVAL;
+
+ if (mgr->ops && mgr->ops->get_sensor_value)
+ return mgr->ops->get_sensor_value(mgr, sensor, value);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_manager_get_board_info - get board info
+ * sensor value
+ * @info: opae_board_info for the card
+ *
+ * Return: 0 on success, otherwise error code
+ */
+int
+opae_mgr_get_board_info(struct opae_manager *mgr,
+ struct opae_board_info **info)
+{
+ if (!mgr || !info)
+ return -EINVAL;
+
+ if (mgr->ops && mgr->ops->get_board_info)
+ return mgr->ops->get_board_info(mgr, info);
+
+ return -ENOENT;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.h
new file mode 100644
index 000000000..cf8ff93a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_hw_api.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_HW_API_H_
+#define _OPAE_HW_API_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include "opae_osdep.h"
+#include "opae_intel_max10.h"
+#include "opae_eth_group.h"
+#include "ifpga_defines.h"
+
+#ifndef PCI_MAX_RESOURCE
+#define PCI_MAX_RESOURCE 6
+#endif
+
+struct opae_adapter;
+
+enum opae_adapter_type {
+ OPAE_FPGA_PCI,
+ OPAE_FPGA_NET,
+};
+
+/* OPAE Manager Data Structure */
+struct opae_manager_ops;
+struct opae_manager_networking_ops;
+
+/*
+ * opae_manager has pointer to its parent adapter, as it could be able to manage
+ * all components on this FPGA device (adapter). If not the case, don't set this
+ * adapter, which limit opae_manager ops to manager itself.
+ */
+struct opae_manager {
+ const char *name;
+ struct opae_adapter *adapter;
+ struct opae_manager_ops *ops;
+ struct opae_manager_networking_ops *network_ops;
+ struct opae_sensor_list *sensor_list;
+ void *data;
+};
+
+/* FIXME: add more management ops, e.g power/thermal and etc */
+struct opae_manager_ops {
+ int (*flash)(struct opae_manager *mgr, int id, const char *buffer,
+ u32 size, u64 *status);
+ int (*get_eth_group_region_info)(struct opae_manager *mgr,
+ struct opae_eth_group_region_info *info);
+ int (*get_sensor_value)(struct opae_manager *mgr,
+ struct opae_sensor_info *sensor,
+ unsigned int *value);
+ int (*get_board_info)(struct opae_manager *mgr,
+ struct opae_board_info **info);
+};
+
+/* networking management ops in FME */
+struct opae_manager_networking_ops {
+ int (*read_mac_rom)(struct opae_manager *mgr, int offset, void *buf,
+ int size);
+ int (*write_mac_rom)(struct opae_manager *mgr, int offset, void *buf,
+ int size);
+ int (*get_eth_group_nums)(struct opae_manager *mgr);
+ int (*get_eth_group_info)(struct opae_manager *mgr,
+ u8 group_id, struct opae_eth_group_info *info);
+ int (*eth_group_reg_read)(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 *data);
+ int (*eth_group_reg_write)(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 data);
+ int (*get_retimer_info)(struct opae_manager *mgr,
+ struct opae_retimer_info *info);
+ int (*get_retimer_status)(struct opae_manager *mgr,
+ struct opae_retimer_status *status);
+};
+
+#define opae_mgr_for_each_sensor(mgr, sensor) \
+ TAILQ_FOREACH(sensor, mgr->sensor_list, node)
+
+/* OPAE Manager APIs */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops,
+ struct opae_manager_networking_ops *network_ops, void *data);
+#define opae_manager_free(mgr) opae_free(mgr)
+int opae_manager_flash(struct opae_manager *mgr, int acc_id, const char *buf,
+ u32 size, u64 *status);
+int opae_manager_get_eth_group_region_info(struct opae_manager *mgr,
+ u8 group_id, struct opae_eth_group_region_info *info);
+struct opae_sensor_info *opae_mgr_get_sensor_by_name(struct opae_manager *mgr,
+ const char *name);
+struct opae_sensor_info *opae_mgr_get_sensor_by_id(struct opae_manager *mgr,
+ unsigned int id);
+int opae_mgr_get_sensor_value_by_name(struct opae_manager *mgr,
+ const char *name, unsigned int *value);
+int opae_mgr_get_sensor_value_by_id(struct opae_manager *mgr,
+ unsigned int id, unsigned int *value);
+int opae_mgr_get_sensor_value(struct opae_manager *mgr,
+ struct opae_sensor_info *sensor,
+ unsigned int *value);
+
+/* OPAE Bridge Data Structure */
+struct opae_bridge_ops;
+
+/*
+ * opae_bridge only has pointer to its downstream accelerator.
+ */
+struct opae_bridge {
+ const char *name;
+ int id;
+ struct opae_accelerator *acc;
+ struct opae_bridge_ops *ops;
+ void *data;
+};
+
+struct opae_bridge_ops {
+ int (*reset)(struct opae_bridge *br);
+};
+
+/* OPAE Bridge APIs */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
+int opae_bridge_reset(struct opae_bridge *br);
+#define opae_bridge_free(br) opae_free(br)
+
+/* OPAE Acceleraotr Data Structure */
+struct opae_accelerator_ops;
+
+/*
+ * opae_accelerator has pointer to its upstream bridge(port).
+ * In some cases, if we allow same user to do PR on its own accelerator, then
+ * set the manager pointer during the enumeration. But in other cases, the PR
+ * functions only could be done via manager in another module / thread / service
+ * / application for better protection.
+ */
+struct opae_accelerator {
+ TAILQ_ENTRY(opae_accelerator) node;
+ const char *name;
+ int index;
+ struct opae_bridge *br;
+ struct opae_manager *mgr;
+ struct opae_accelerator_ops *ops;
+ void *data;
+};
+
+struct opae_acc_info {
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct opae_acc_region_info {
+ u32 flags;
+#define ACC_REGION_READ (1 << 0)
+#define ACC_REGION_WRITE (1 << 1)
+#define ACC_REGION_MMIO (1 << 2)
+ u32 index;
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_accelerator_ops {
+ int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*get_info)(struct opae_accelerator *acc,
+ struct opae_acc_info *info);
+ int (*get_region_info)(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+ int (*set_irq)(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+ int (*get_uuid)(struct opae_accelerator *acc,
+ struct uuid *uuid);
+};
+
+/* OPAE accelerator APIs */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data);
+#define opae_accelerator_free(acc) opae_free(acc)
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid);
+
+static inline struct opae_bridge *
+opae_acc_get_br(struct opae_accelerator *acc)
+{
+ return acc ? acc->br : NULL;
+}
+
+static inline struct opae_manager *
+opae_acc_get_mgr(struct opae_accelerator *acc)
+{
+ return acc ? acc->mgr : NULL;
+}
+
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+
+#define opae_acc_reg_read64(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 8, data)
+#define opae_acc_reg_write64(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 8, data)
+#define opae_acc_reg_read32(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 4, data)
+#define opae_acc_reg_write32(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 4, data)
+#define opae_acc_reg_read16(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 2, data)
+#define opae_acc_reg_write16(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 2, data)
+#define opae_acc_reg_read8(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 1, data)
+#define opae_acc_reg_write8(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 1, data)
+
+/*for data stream read/write*/
+int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+
+/* OPAE Adapter Data Structure */
+struct opae_adapter_data {
+ enum opae_adapter_type type;
+};
+
+struct opae_reg_region {
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_adapter_data_pci {
+ enum opae_adapter_type type;
+ u16 device_id;
+ u16 vendor_id;
+ u16 bus; /*Device bus for PCI */
+ u16 devid; /* Device ID */
+ u16 function; /* Device function */
+ struct opae_reg_region region[PCI_MAX_RESOURCE];
+ int vfio_dev_fd; /* VFIO device file descriptor */
+};
+
+/* FIXME: OPAE_FPGA_NET type */
+struct opae_adapter_data_net {
+ enum opae_adapter_type type;
+};
+
+struct opae_adapter_ops {
+ int (*enumerate)(struct opae_adapter *adapter);
+ void (*destroy)(struct opae_adapter *adapter);
+};
+
+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
+
+#define opae_adapter_for_each_acc(adatper, acc) \
+ TAILQ_FOREACH(acc, &adapter->acc_list, node)
+
+struct opae_adapter {
+ const char *name;
+ struct opae_manager *mgr;
+ struct opae_accelerator_list acc_list;
+ struct opae_adapter_ops *ops;
+ void *data;
+};
+
+/* OPAE Adapter APIs */
+void *opae_adapter_data_alloc(enum opae_adapter_type type);
+#define opae_adapter_data_free(data) opae_free(data)
+
+int opae_adapter_init(struct opae_adapter *adapter,
+ const char *name, void *data);
+#define opae_adapter_free(adapter) opae_free(adapter)
+
+int opae_adapter_enumerate(struct opae_adapter *adapter);
+void opae_adapter_destroy(struct opae_adapter *adapter);
+static inline struct opae_manager *
+opae_adapter_get_mgr(struct opae_adapter *adapter)
+{
+ return adapter ? adapter->mgr : NULL;
+}
+
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
+
+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
+}
+
+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_REMOVE(&adapter->acc_list, acc, node);
+}
+
+/* OPAE vBNG network datastruct */
+#define OPAE_ETHER_ADDR_LEN 6
+
+struct opae_ether_addr {
+ unsigned char addr_bytes[OPAE_ETHER_ADDR_LEN];
+} __rte_packed;
+
+/* OPAE vBNG network API*/
+int opae_manager_read_mac_rom(struct opae_manager *mgr, int port,
+ struct opae_ether_addr *addr);
+int opae_manager_write_mac_rom(struct opae_manager *mgr, int port,
+ struct opae_ether_addr *addr);
+int opae_manager_get_retimer_info(struct opae_manager *mgr,
+ struct opae_retimer_info *info);
+int opae_manager_get_retimer_status(struct opae_manager *mgr,
+ struct opae_retimer_status *status);
+int opae_manager_get_eth_group_nums(struct opae_manager *mgr);
+int opae_manager_get_eth_group_info(struct opae_manager *mgr,
+ u8 group_id, struct opae_eth_group_info *info);
+int opae_manager_eth_group_write_reg(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 data);
+int opae_manager_eth_group_read_reg(struct opae_manager *mgr, u8 group_id,
+ u8 type, u8 index, u16 addr, u32 *data);
+int opae_mgr_get_board_info(struct opae_manager *mgr,
+ struct opae_board_info **info);
+#endif /* _OPAE_HW_API_H_*/
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.c
new file mode 100644
index 000000000..846d751f5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.c
@@ -0,0 +1,512 @@
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include "opae_osdep.h"
+#include "opae_i2c.h"
+
+static int i2c_transfer(struct altera_i2c_dev *dev,
+ struct i2c_msg *msg, int num)
+{
+ int ret, try;
+
+ for (ret = 0, try = 0; try < I2C_XFER_RETRY; try++) {
+ ret = dev->xfer(dev, msg, num);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i2c read function
+ */
+int i2c_read(struct altera_i2c_dev *dev, int flags, unsigned int slave_addr,
+ u32 offset, u8 *buf, u32 count)
+{
+ u8 msgbuf[2];
+ int i = 0;
+ int ret;
+
+ pthread_mutex_lock(&dev->lock);
+
+ if (flags & I2C_FLAG_ADDR16)
+ msgbuf[i++] = offset >> 8;
+
+ msgbuf[i++] = offset;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = i,
+ .buf = msgbuf,
+ },
+ {
+ .addr = slave_addr,
+ .flags = I2C_M_RD,
+ .len = count,
+ .buf = buf,
+ },
+ };
+
+ if (!dev->xfer) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ ret = i2c_transfer(dev, msg, 2);
+
+exit:
+ pthread_mutex_unlock(&dev->lock);
+ return ret;
+}
+
+int i2c_write(struct altera_i2c_dev *dev, int flags, unsigned int slave_addr,
+ u32 offset, u8 *buffer, int len)
+{
+ struct i2c_msg msg;
+ u8 *buf;
+ int ret;
+ int i = 0;
+
+ pthread_mutex_lock(&dev->lock);
+
+ if (!dev->xfer) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ buf = opae_malloc(I2C_MAX_OFFSET_LEN + len);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ msg.addr = slave_addr;
+ msg.flags = 0;
+ msg.buf = buf;
+
+ if (flags & I2C_FLAG_ADDR16)
+ msg.buf[i++] = offset >> 8;
+
+ msg.buf[i++] = offset;
+ opae_memcpy(&msg.buf[i], buffer, len);
+ msg.len = i + len;
+
+ ret = i2c_transfer(dev, &msg, 1);
+
+ opae_free(buf);
+exit:
+ pthread_mutex_unlock(&dev->lock);
+ return ret;
+}
+
+int i2c_read8(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count)
+{
+ return i2c_read(dev, 0, slave_addr, offset, buf, count);
+}
+
+int i2c_read16(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count)
+{
+ return i2c_read(dev, I2C_FLAG_ADDR16, slave_addr, offset,
+ buf, count);
+}
+
+int i2c_write8(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count)
+{
+ return i2c_write(dev, 0, slave_addr, offset, buf, count);
+}
+
+int i2c_write16(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count)
+{
+ return i2c_write(dev, I2C_FLAG_ADDR16, slave_addr, offset,
+ buf, count);
+}
+
+static void i2c_indirect_write(struct altera_i2c_dev *dev, u32 reg,
+ u32 value)
+{
+ u64 ctrl;
+
+ ctrl = I2C_CTRL_W | (reg >> 2);
+
+ opae_writeq(value & I2C_WRITE_DATA_MASK, dev->base + I2C_WRITE);
+ opae_writeq(ctrl, dev->base + I2C_CTRL);
+}
+
+static u32 i2c_indirect_read(struct altera_i2c_dev *dev, u32 reg)
+{
+ u64 tmp;
+ u64 ctrl;
+ u32 value;
+
+ ctrl = I2C_CTRL_R | (reg >> 2);
+ opae_writeq(ctrl, dev->base + I2C_CTRL);
+
+ /* FIXME: Read one more time to avoid HW timing issue. */
+ tmp = opae_readq(dev->base + I2C_READ);
+ tmp = opae_readq(dev->base + I2C_READ);
+
+ value = tmp & I2C_READ_DATA_MASK;
+
+ return value;
+}
+
+static void altera_i2c_transfer(struct altera_i2c_dev *dev, u32 data)
+{
+ /*send STOP on last byte*/
+ if (dev->msg_len == 1)
+ data |= ALTERA_I2C_TFR_CMD_STO;
+ if (dev->msg_len > 0)
+ i2c_indirect_write(dev, ALTERA_I2C_TFR_CMD, data);
+}
+
+static void altera_i2c_disable(struct altera_i2c_dev *dev)
+{
+ u32 val = i2c_indirect_read(dev, ALTERA_I2C_CTRL);
+
+ i2c_indirect_write(dev, ALTERA_I2C_CTRL, val&~ALTERA_I2C_CTRL_EN);
+}
+
+static void altera_i2c_enable(struct altera_i2c_dev *dev)
+{
+ u32 val = i2c_indirect_read(dev, ALTERA_I2C_CTRL);
+
+ i2c_indirect_write(dev, ALTERA_I2C_CTRL, val | ALTERA_I2C_CTRL_EN);
+}
+
+static void altera_i2c_reset(struct altera_i2c_dev *dev)
+{
+ altera_i2c_disable(dev);
+ altera_i2c_enable(dev);
+}
+
+static int altera_i2c_wait_core_idle(struct altera_i2c_dev *dev)
+{
+ int retry = 0;
+
+ while (i2c_indirect_read(dev, ALTERA_I2C_STATUS)
+ & ALTERA_I2C_STAT_CORE) {
+ if (retry++ > ALTERA_I2C_TIMEOUT_US) {
+ dev_err(dev, "timeout: Core Status not IDLE...\n");
+ return -EBUSY;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+static void altera_i2c_enable_interrupt(struct altera_i2c_dev *dev,
+ u32 mask, bool enable)
+{
+ u32 status;
+
+ status = i2c_indirect_read(dev, ALTERA_I2C_ISER);
+ if (enable)
+ dev->isr_mask = status | mask;
+ else
+ dev->isr_mask = status&~mask;
+
+ i2c_indirect_write(dev, ALTERA_I2C_ISER, dev->isr_mask);
+}
+
+static void altera_i2c_interrupt_clear(struct altera_i2c_dev *dev, u32 mask)
+{
+ u32 int_en;
+
+ int_en = i2c_indirect_read(dev, ALTERA_I2C_ISR);
+
+ i2c_indirect_write(dev, ALTERA_I2C_ISR, int_en | mask);
+}
+
+static void altera_i2c_read_rx_fifo(struct altera_i2c_dev *dev)
+{
+ size_t rx_avail;
+ size_t bytes;
+
+ rx_avail = i2c_indirect_read(dev, ALTERA_I2C_RX_FIFO_LVL);
+ bytes = min(rx_avail, dev->msg_len);
+
+ while (bytes-- > 0) {
+ *dev->buf++ = i2c_indirect_read(dev, ALTERA_I2C_RX_DATA);
+ dev->msg_len--;
+ altera_i2c_transfer(dev, 0);
+ }
+}
+
+static void altera_i2c_stop(struct altera_i2c_dev *dev)
+{
+ i2c_indirect_write(dev, ALTERA_I2C_TFR_CMD, ALTERA_I2C_TFR_CMD_STO);
+}
+
+static int altera_i2c_fill_tx_fifo(struct altera_i2c_dev *dev)
+{
+ size_t tx_avail;
+ int bytes;
+ int ret;
+
+ tx_avail = dev->fifo_size -
+ i2c_indirect_read(dev, ALTERA_I2C_TC_FIFO_LVL);
+ bytes = min(tx_avail, dev->msg_len);
+ ret = dev->msg_len - bytes;
+
+ while (bytes-- > 0) {
+ altera_i2c_transfer(dev, *dev->buf++);
+ dev->msg_len--;
+ }
+
+ return ret;
+}
+
+static u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
+{
+ return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+}
+
+static int altera_i2c_wait_complete(struct altera_i2c_dev *dev,
+ u32 *status)
+{
+ int retry = 0;
+
+ while (!((*status = i2c_indirect_read(dev, ALTERA_I2C_ISR))
+ & dev->isr_mask)) {
+ if (retry++ > ALTERA_I2C_TIMEOUT_US)
+ return -EBUSY;
+
+ udelay(1000);
+ }
+
+ return 0;
+}
+
+static bool altera_handle_i2c_status(struct altera_i2c_dev *dev, u32 status)
+{
+ bool read, finish = false;
+ int ret;
+
+ read = (dev->msg->flags & I2C_M_RD) != 0;
+
+ if (status & ALTERA_I2C_ISR_ARB) {
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ISR_ARB);
+ dev->msg_err = -EAGAIN;
+ finish = true;
+ } else if (status & ALTERA_I2C_ISR_NACK) {
+ dev_debug(dev, "could not get ACK\n");
+ dev->msg_err = -ENXIO;
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ISR_NACK);
+ altera_i2c_stop(dev);
+ finish = true;
+ } else if (read && (status & ALTERA_I2C_ISR_RXOF)) {
+ /* RX FIFO Overflow */
+ altera_i2c_read_rx_fifo(dev);
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ISER_RXOF_EN);
+ altera_i2c_stop(dev);
+ dev_err(dev, "error: RX FIFO overflow\n");
+ finish = true;
+ } else if (read && (status & ALTERA_I2C_ISR_RXRDY)) {
+ altera_i2c_read_rx_fifo(dev);
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ISR_RXRDY);
+ if (!dev->msg_len)
+ finish = true;
+ } else if (!read && (status & ALTERA_I2C_ISR_TXRDY)) {
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ISR_TXRDY);
+ if (dev->msg_len > 0)
+ altera_i2c_fill_tx_fifo(dev);
+ else
+ finish = true;
+ } else {
+ dev_err(dev, "unexpected status:0x%x\n", status);
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ALL_IRQ);
+ }
+
+ if (finish) {
+ ret = altera_i2c_wait_core_idle(dev);
+ if (ret)
+ dev_err(dev, "message timeout\n");
+
+ altera_i2c_enable_interrupt(dev, ALTERA_I2C_ALL_IRQ, false);
+ altera_i2c_interrupt_clear(dev, ALTERA_I2C_ALL_IRQ);
+ dev_debug(dev, "message done\n");
+ }
+
+ return finish;
+}
+
+static bool altera_i2c_poll_status(struct altera_i2c_dev *dev)
+{
+ u32 status;
+ bool finish = false;
+ int i = 0;
+
+ do {
+ if (altera_i2c_wait_complete(dev, &status)) {
+ dev_err(dev, "altera i2c wait complete timeout, status=0x%x\n",
+ status);
+ return -EBUSY;
+ }
+
+ finish = altera_handle_i2c_status(dev, status);
+
+ if (i++ > I2C_XFER_RETRY)
+ break;
+
+ } while (!finish);
+
+ return finish;
+}
+
+static int altera_i2c_xfer_msg(struct altera_i2c_dev *dev,
+ struct i2c_msg *msg)
+{
+ u32 int_mask = ALTERA_I2C_ISR_RXOF |
+ ALTERA_I2C_ISR_ARB | ALTERA_I2C_ISR_NACK;
+ u8 addr = i2c_8bit_addr_from_msg(msg);
+ bool finish;
+
+ dev->msg = msg;
+ dev->msg_len = msg->len;
+ dev->buf = msg->buf;
+ dev->msg_err = 0;
+ altera_i2c_enable(dev);
+
+ /*make sure RX FIFO is emtry*/
+ do {
+ i2c_indirect_read(dev, ALTERA_I2C_RX_DATA);
+ } while (i2c_indirect_read(dev, ALTERA_I2C_RX_FIFO_LVL));
+
+ i2c_indirect_write(dev, ALTERA_I2C_TFR_CMD_RW_D,
+ ALTERA_I2C_TFR_CMD_STA | addr);
+
+ /*enable irq*/
+ if (msg->flags & I2C_M_RD) {
+ int_mask |= ALTERA_I2C_ISR_RXOF | ALTERA_I2C_ISR_RXRDY;
+ /* in polling mode, we should set this ISR register? */
+ altera_i2c_enable_interrupt(dev, int_mask, true);
+ altera_i2c_transfer(dev, 0);
+ } else {
+ int_mask |= ALTERA_I2C_ISR_TXRDY;
+ altera_i2c_enable_interrupt(dev, int_mask, true);
+ altera_i2c_fill_tx_fifo(dev);
+ }
+
+ finish = altera_i2c_poll_status(dev);
+ if (!finish) {
+ dev->msg_err = -ETIMEDOUT;
+ dev_err(dev, "%s: i2c transfer error\n", __func__);
+ }
+
+ altera_i2c_enable_interrupt(dev, int_mask, false);
+
+ if (i2c_indirect_read(dev, ALTERA_I2C_STATUS) & ALTERA_I2C_STAT_CORE)
+ dev_info(dev, "core not idle...\n");
+
+ altera_i2c_disable(dev);
+
+ return dev->msg_err;
+}
+
+static int altera_i2c_xfer(struct altera_i2c_dev *dev,
+ struct i2c_msg *msg, int num)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num; i++, msg++) {
+ ret = altera_i2c_xfer_msg(dev, msg);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void altera_i2c_hardware_init(struct altera_i2c_dev *dev)
+{
+ u32 divisor = dev->i2c_clk / dev->bus_clk_rate;
+ u32 clk_mhz = dev->i2c_clk / 1000000;
+ u32 tmp = (ALTERA_I2C_THRESHOLD << ALTERA_I2C_CTRL_RXT_SHFT) |
+ (ALTERA_I2C_THRESHOLD << ALTERA_I2C_CTRL_TCT_SHFT);
+ u32 t_high, t_low;
+
+ if (dev->bus_clk_rate <= 100000) {
+ tmp &= ~ALTERA_I2C_CTRL_BSPEED;
+ /*standard mode SCL 50/50*/
+ t_high = divisor*1/2;
+ t_low = divisor*1/2;
+ } else {
+ tmp |= ALTERA_I2C_CTRL_BSPEED;
+ /*Fast mode SCL 33/66*/
+ t_high = divisor*1/3;
+ t_low = divisor*2/3;
+ }
+
+ i2c_indirect_write(dev, ALTERA_I2C_CTRL, tmp);
+
+ dev_info(dev, "%s: rate=%uHz per_clk=%uMHz -> ratio=1:%u\n",
+ __func__, dev->bus_clk_rate, clk_mhz, divisor);
+
+ /*reset the i2c*/
+ altera_i2c_reset(dev);
+
+ /*Set SCL high Time*/
+ i2c_indirect_write(dev, ALTERA_I2C_SCL_HIGH, t_high);
+ /*Set SCL low time*/
+ i2c_indirect_write(dev, ALTERA_I2C_SCL_LOW, t_low);
+ /*Set SDA Hold time, 300ms*/
+ i2c_indirect_write(dev, ALTERA_I2C_SDA_HOLD, (300*clk_mhz)/1000);
+
+ altera_i2c_enable_interrupt(dev, ALTERA_I2C_ALL_IRQ, false);
+}
+
+struct altera_i2c_dev *altera_i2c_probe(void *base)
+{
+ struct altera_i2c_dev *dev;
+
+ dev = opae_malloc(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->base = (u8 *)base;
+ dev->i2c_param.info = opae_readq(dev->base + I2C_PARAM);
+
+ if (dev->i2c_param.devid != 0xEE011) {
+ dev_err(dev, "find a invalid i2c master\n");
+ return NULL;
+ }
+
+ dev->fifo_size = dev->i2c_param.fifo_depth;
+
+ if (dev->i2c_param.max_req == ALTERA_I2C_100KHZ)
+ dev->bus_clk_rate = 100000;
+ else if (dev->i2c_param.max_req == ALTERA_I2C_400KHZ)
+ /* i2c bus clk 400KHz*/
+ dev->bus_clk_rate = 400000;
+
+ /* i2c input clock for vista creek is 100MHz */
+ dev->i2c_clk = dev->i2c_param.ref_clk * 1000000;
+ dev->xfer = altera_i2c_xfer;
+
+ if (pthread_mutex_init(&dev->lock, NULL))
+ return NULL;
+
+ altera_i2c_hardware_init(dev);
+
+ return dev;
+}
+
+void altera_i2c_remove(struct altera_i2c_dev *dev)
+{
+ if (dev) {
+ pthread_mutex_destroy(&dev->lock);
+ altera_i2c_disable(dev);
+ opae_free(dev);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.h
new file mode 100644
index 000000000..266e127b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_i2c.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#ifndef _OPAE_I2C_H
+#define _OPAE_I2C_H
+
+#include "opae_osdep.h"
+
+#define ALTERA_I2C_TFR_CMD 0x00 /* Transfer Command register */
+#define ALTERA_I2C_TFR_CMD_STA BIT(9) /* send START before byte */
+#define ALTERA_I2C_TFR_CMD_STO BIT(8) /* send STOP after byte */
+#define ALTERA_I2C_TFR_CMD_RW_D BIT(0) /* Direction of transfer */
+#define ALTERA_I2C_RX_DATA 0x04 /* RX data FIFO register */
+#define ALTERA_I2C_CTRL 0x8 /* Control register */
+#define ALTERA_I2C_CTRL_RXT_SHFT 4 /* RX FIFO Threshold */
+#define ALTERA_I2C_CTRL_TCT_SHFT 2 /* TFER CMD FIFO Threshold */
+#define ALTERA_I2C_CTRL_BSPEED BIT(1) /* Bus Speed */
+#define ALTERA_I2C_CTRL_EN BIT(0) /* Enable Core */
+#define ALTERA_I2C_ISER 0xc /* Interrupt Status Enable register */
+#define ALTERA_I2C_ISER_RXOF_EN BIT(4) /* Enable RX OVERFLOW IRQ */
+#define ALTERA_I2C_ISER_ARB_EN BIT(3) /* Enable ARB LOST IRQ */
+#define ALTERA_I2C_ISER_NACK_EN BIT(2) /* Enable NACK DET IRQ */
+#define ALTERA_I2C_ISER_RXRDY_EN BIT(1) /* Enable RX Ready IRQ */
+#define ALTERA_I2C_ISER_TXRDY_EN BIT(0) /* Enable TX Ready IRQ */
+#define ALTERA_I2C_ISR 0x10 /* Interrupt Status register */
+#define ALTERA_I2C_ISR_RXOF BIT(4) /* RX OVERFLOW */
+#define ALTERA_I2C_ISR_ARB BIT(3) /* ARB LOST */
+#define ALTERA_I2C_ISR_NACK BIT(2) /* NACK DET */
+#define ALTERA_I2C_ISR_RXRDY BIT(1) /* RX Ready */
+#define ALTERA_I2C_ISR_TXRDY BIT(0) /* TX Ready */
+#define ALTERA_I2C_STATUS 0x14 /* Status register */
+#define ALTERA_I2C_STAT_CORE BIT(0) /* Core Status */
+#define ALTERA_I2C_TC_FIFO_LVL 0x18 /* Transfer FIFO LVL register */
+#define ALTERA_I2C_RX_FIFO_LVL 0x1c /* Receive FIFO LVL register */
+#define ALTERA_I2C_SCL_LOW 0x20 /* SCL low count register */
+#define ALTERA_I2C_SCL_HIGH 0x24 /* SCL high count register */
+#define ALTERA_I2C_SDA_HOLD 0x28 /* SDA hold count register */
+
+#define ALTERA_I2C_ALL_IRQ (ALTERA_I2C_ISR_RXOF | ALTERA_I2C_ISR_ARB | \
+ ALTERA_I2C_ISR_NACK | ALTERA_I2C_ISR_RXRDY | \
+ ALTERA_I2C_ISR_TXRDY)
+
+#define ALTERA_I2C_THRESHOLD 0
+#define ALTERA_I2C_DFLT_FIFO_SZ 8
+#define ALTERA_I2C_TIMEOUT_US 250000 /* 250ms */
+
+#define I2C_PARAM 0x8
+#define I2C_CTRL 0x10
+#define I2C_CTRL_R BIT_ULL(9)
+#define I2C_CTRL_W BIT_ULL(8)
+#define I2C_CTRL_ADDR_MASK GENMASK_ULL(3, 0)
+#define I2C_READ 0x18
+#define I2C_READ_DATA_VALID BIT_ULL(32)
+#define I2C_READ_DATA_MASK GENMASK_ULL(31, 0)
+#define I2C_WRITE 0x20
+#define I2C_WRITE_DATA_MASK GENMASK_ULL(31, 0)
+
+#define ALTERA_I2C_100KHZ 0
+#define ALTERA_I2C_400KHZ 1
+
+/* i2c slave using 16bit address */
+#define I2C_FLAG_ADDR16 1
+
+#define I2C_XFER_RETRY 10
+
+struct i2c_core_param {
+ union {
+ u64 info;
+ struct {
+ u16 fifo_depth:9;
+ u8 interface:1;
+ /*reference clock of I2C core in MHz*/
+ u32 ref_clk:10;
+ /*Max I2C interface freq*/
+ u8 max_req:4;
+ u64 devid:32;
+ /* number of MAC address*/
+ u8 nu_macs:8;
+ };
+ };
+};
+
+struct altera_i2c_dev {
+ u8 *base;
+ struct i2c_core_param i2c_param;
+ u32 fifo_size;
+ u32 bus_clk_rate; /* i2c bus clock */
+ u32 i2c_clk; /* i2c input clock */
+ struct i2c_msg *msg;
+ size_t msg_len;
+ int msg_err;
+ u32 isr_mask;
+ u8 *buf;
+ int (*xfer)(struct altera_i2c_dev *dev, struct i2c_msg *msg, int num);
+ pthread_mutex_t lock;
+};
+
+/**
+ * struct i2c_msg: an I2C message
+ */
+struct i2c_msg {
+ unsigned int addr;
+ unsigned int flags;
+ unsigned int len;
+ u8 *buf;
+};
+
+#define I2C_MAX_OFFSET_LEN 4
+
+enum i2c_msg_flags {
+ I2C_M_TEN = 0x0010, /*ten-bit chip address*/
+ I2C_M_RD = 0x0001, /*read data*/
+ I2C_M_STOP = 0x8000, /*send stop after this message*/
+};
+
+struct altera_i2c_dev *altera_i2c_probe(void *base);
+void altera_i2c_remove(struct altera_i2c_dev *dev);
+int i2c_read(struct altera_i2c_dev *dev, int flags, unsigned int slave_addr,
+ u32 offset, u8 *buf, u32 count);
+int i2c_write(struct altera_i2c_dev *dev, int flags, unsigned int slave_addr,
+ u32 offset, u8 *buffer, int len);
+int i2c_read8(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count);
+int i2c_read16(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count);
+int i2c_write8(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count);
+int i2c_write16(struct altera_i2c_dev *dev, unsigned int slave_addr, u32 offset,
+ u8 *buf, u32 count);
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.c
new file mode 100644
index 000000000..89c7b4920
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.c
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_ifpga_hw_api.h"
+#include "ifpga_api.h"
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data || !fme_info)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ spinlock_lock(&fme->lock);
+ fme_info->capability = fme->capability;
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
+ IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
+}
+
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !port_info)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ port_info->capability = port->capability;
+ port_info->num_uafu_irqs = port->num_uafu_irqs;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !info)
+ return -EINVAL;
+
+ /* Only support STP region now */
+ if (info->index != PORT_REGION_INDEX_STP)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ info->addr = port->stp_addr;
+ info->size = port->stp_size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.h
new file mode 100644
index 000000000..bab33862e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_ifpga_hw_api.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_IFPGA_HW_API_H_
+#define _OPAE_IFPGA_HW_API_H_
+
+#include "opae_hw_api.h"
+
+/**
+ * struct feature_prop - data structure for feature property
+ * @feature_id: id of this feature.
+ * @prop_id: id of this property under this feature.
+ * @data: property value to set/get.
+ */
+struct feature_prop {
+ u64 feature_id;
+ u64 prop_id;
+ u64 data;
+};
+
+#define IFPGA_FIU_ID_FME 0x0
+#define IFPGA_FIU_ID_PORT 0x1
+
+#define IFPGA_FME_FEATURE_ID_HEADER 0x0
+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define IFPGA_FME_FEATURE_ID_POWER_MGMT 0x2
+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define IFPGA_FME_FEATURE_ID_PR_MGMT 0x5
+#define IFPGA_FME_FEATURE_ID_HSSI 0x6
+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define IFPGA_PORT_FEATURE_ID_HEADER 0x0
+#define IFPGA_PORT_FEATURE_ID_AFU 0xff
+#define IFPGA_PORT_FEATURE_ID_ERROR 0x10
+#define IFPGA_PORT_FEATURE_ID_UMSG 0x11
+#define IFPGA_PORT_FEATURE_ID_UINT 0x12
+#define IFPGA_PORT_FEATURE_ID_STP 0x13
+
+/*
+ * PROP format (TOP + SUB + ID)
+ *
+ * (~0x0) means this field is unused.
+ */
+#define PROP_TOP GENMASK(31, 24)
+#define PROP_TOP_UNUSED 0xff
+#define PROP_SUB GENMASK(23, 16)
+#define PROP_SUB_UNUSED 0xff
+#define PROP_ID GENMASK(15, 0)
+
+#define PROP(_top, _sub, _id) \
+ (SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
+ SET_FIELD(PROP_ID, _id))
+
+/* FME head feature's properties*/
+#define FME_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define FME_HDR_PROP_PORTS_NUM 0x2 /* RDONLY */
+#define FME_HDR_PROP_CACHE_SIZE 0x3 /* RDONLY */
+#define FME_HDR_PROP_VERSION 0x4 /* RDONLY */
+#define FME_HDR_PROP_SOCKET_ID 0x5 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_ID 0x6 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_METADATA 0x7 /* RDONLY */
+
+/* FME error reporting feature's properties */
+/* FME error reporting properties format */
+#define ERR_PROP(_top, _id) PROP(_top, 0xff, _id)
+#define ERR_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define ERR_PROP_TOP_FME_ERR 0x1
+#define ERR_PROP_ROOT(_id) ERR_PROP(0xff, _id)
+#define ERR_PROP_FME_ERR(_id) ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
+
+#define FME_ERR_PROP_ERRORS ERR_PROP_FME_ERR(0x1)
+#define FME_ERR_PROP_FIRST_ERROR ERR_PROP_FME_ERR(0x2)
+#define FME_ERR_PROP_NEXT_ERROR ERR_PROP_FME_ERR(0x3)
+#define FME_ERR_PROP_CLEAR ERR_PROP_FME_ERR(0x4) /* WO */
+#define FME_ERR_PROP_SEU_EMR_LOW ERR_PROP_FME_ERR(0x5)
+#define FME_ERR_PROP_SEU_EMR_HIGH ERR_PROP_FME_ERR(0x6)
+#define FME_ERR_PROP_REVISION ERR_PROP_ROOT(0x5)
+#define FME_ERR_PROP_PCIE0_ERRORS ERR_PROP_ROOT(0x6) /* RW */
+#define FME_ERR_PROP_PCIE1_ERRORS ERR_PROP_ROOT(0x7) /* RW */
+#define FME_ERR_PROP_NONFATAL_ERRORS ERR_PROP_ROOT(0x8)
+#define FME_ERR_PROP_CATFATAL_ERRORS ERR_PROP_ROOT(0x9)
+#define FME_ERR_PROP_INJECT_ERRORS ERR_PROP_ROOT(0xa) /* RW */
+
+/* FME thermal feature's properties */
+#define FME_THERMAL_PROP_THRESHOLD1 0x1 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD2 0x2 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD_TRIP 0x3 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_REACHED 0x4 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD2_REACHED 0x5 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_POLICY 0x6 /* RW */
+#define FME_THERMAL_PROP_TEMPERATURE 0x7 /* RDONLY */
+#define FME_THERMAL_PROP_REVISION 0x8 /* RDONLY */
+
+/* FME power feature's properties */
+#define FME_PWR_PROP_CONSUMED 0x1 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD1 0x2 /* RW */
+#define FME_PWR_PROP_THRESHOLD2 0x3 /* RW */
+#define FME_PWR_PROP_THRESHOLD1_STATUS 0x4 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD2_STATUS 0x5 /* RDONLY */
+#define FME_PWR_PROP_RTL 0x6 /* RDONLY */
+#define FME_PWR_PROP_XEON_LIMIT 0x7 /* RDONLY */
+#define FME_PWR_PROP_FPGA_LIMIT 0x8 /* RDONLY */
+#define FME_PWR_PROP_REVISION 0x9 /* RDONLY */
+
+/* FME iperf/dperf PROP format */
+#define PERF_PROP_TOP_CACHE 0x1
+#define PERF_PROP_TOP_VTD 0x2
+#define PERF_PROP_TOP_FAB 0x3
+#define PERF_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define PERF_PROP_SUB_UNUSED PROP_SUB_UNUSED
+
+#define PERF_PROP_ROOT(_id) PROP(0xff, 0xff, _id)
+#define PERF_PROP_CACHE(_id) PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
+#define PERF_PROP_VTD(_sub, _id) PROP(PERF_PROP_TOP_VTD, _sub, _id)
+#define PERF_PROP_VTD_ROOT(_id) PROP(PERF_PROP_TOP_VTD, 0xff, _id)
+#define PERF_PROP_FAB(_sub, _id) PROP(PERF_PROP_TOP_FAB, _sub, _id)
+#define PERF_PROP_FAB_ROOT(_id) PROP(PERF_PROP_TOP_FAB, 0xff, _id)
+
+/* FME iperf feature's properties */
+#define FME_IPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_IPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* iperf CACHE properties */
+#define FME_IPERF_PROP_CACHE_FREEZE PERF_PROP_CACHE(0x1) /* RW */
+#define FME_IPERF_PROP_CACHE_READ_HIT PERF_PROP_CACHE(0x2)
+#define FME_IPERF_PROP_CACHE_READ_MISS PERF_PROP_CACHE(0x3)
+#define FME_IPERF_PROP_CACHE_WRITE_HIT PERF_PROP_CACHE(0x4)
+#define FME_IPERF_PROP_CACHE_WRITE_MISS PERF_PROP_CACHE(0x5)
+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST PERF_PROP_CACHE(0x6)
+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL PERF_PROP_CACHE(0x7)
+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL PERF_PROP_CACHE(0x8)
+#define FME_IPERF_PROP_CACHE_RX_EVICTION PERF_PROP_CACHE(0x9)
+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xa)
+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xb)
+/* iperf VTD properties */
+#define FME_IPERF_PROP_VTD_FREEZE PERF_PROP_VTD_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT PERF_PROP_VTD_ROOT(0x2)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT PERF_PROP_VTD_ROOT(0x3)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT PERF_PROP_VTD_ROOT(0x4)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT PERF_PROP_VTD_ROOT(0x5)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT PERF_PROP_VTD_ROOT(0x6)
+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT PERF_PROP_VTD_ROOT(0x7)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS PERF_PROP_VTD_ROOT(0x8)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS PERF_PROP_VTD_ROOT(0x9)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS PERF_PROP_VTD_ROOT(0xa)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS PERF_PROP_VTD_ROOT(0xb)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS PERF_PROP_VTD_ROOT(0xc)
+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS PERF_PROP_VTD_ROOT(0xd)
+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n) PERF_PROP_VTD(n, 0xe)
+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n) PERF_PROP_VTD(n, 0xf)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n) PERF_PROP_VTD(n, 0x10)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n) PERF_PROP_VTD(n, 0x11)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n) PERF_PROP_VTD(n, 0x12)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n) PERF_PROP_VTD(n, 0x13)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n) PERF_PROP_VTD(n, 0x14)
+/* iperf FAB properties */
+#define FME_IPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_IPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_IPERF_PROP_FAB_PCIE1_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_IPERF_PROP_FAB_PCIE1_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_IPERF_PROP_FAB_UPI_READ PERF_PROP_FAB_ROOT(0x6)
+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n) PERF_PROP_FAB(n, 0x6)
+#define FME_IPERF_PROP_FAB_UPI_WRITE PERF_PROP_FAB_ROOT(0x7)
+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n) PERF_PROP_FAB(n, 0x7)
+#define FME_IPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x8)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x8)
+#define FME_IPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x9)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x9)
+#define FME_IPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0xa) /* RW */
+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0xa) /* RW */
+
+/* FME dperf properties */
+#define FME_DPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_DPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* dperf FAB properties */
+#define FME_DPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_DPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_DPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_DPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_DPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_DPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0x6) /* RW */
+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0x6) /* RW */
+
+/*PORT hdr feature's properties*/
+#define PORT_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_HDR_PROP_PORTIDX 0x2 /* RDONLY */
+#define PORT_HDR_PROP_LATENCY_TOLERANCE 0x3 /* RDONLY */
+#define PORT_HDR_PROP_AP1_EVENT 0x4 /* RW */
+#define PORT_HDR_PROP_AP2_EVENT 0x5 /* RW */
+#define PORT_HDR_PROP_POWER_STATE 0x6 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_FREQCMD 0x7 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD 0x8 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQSTS 0x9 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_CNTRSTS 0xa /* RDONLY */
+
+/*PORT error feature's properties*/
+#define PORT_ERR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_ERR_PROP_ERRORS 0x2 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_ERROR 0x3 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB 0x4 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB 0x5 /* RDONLY */
+#define PORT_ERR_PROP_CLEAR 0x6 /* WRONLY */
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+
+/*
+ * Retrieve information about the fpga fme.
+ * Driver fills the info in provided struct fpga_fme_info.
+ */
+struct fpga_fme_info {
+ u32 capability; /* The capability of FME device */
+#define FPGA_FME_CAP_ERR_IRQ (1 << 0) /* Support fme error interrupt */
+};
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info);
+
+/* Set eventfd information for ifpga FME error interrupt */
+struct fpga_fme_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set);
+
+/*
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct fpga_port_info.
+ */
+struct fpga_port_info {
+ u32 capability; /* The capability of port device */
+#define FPGA_PORT_CAP_ERR_IRQ (1 << 0) /* Support port error interrupt */
+#define FPGA_PORT_CAP_UAFU_IRQ (1 << 1) /* Support uafu error interrupt */
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+};
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info);
+/*
+ * Retrieve region information about the fpga port.
+ * Driver needs to fill the index of struct fpga_port_region_info.
+ */
+struct fpga_port_region_info {
+ u32 index;
+#define PORT_REGION_INDEX_STP (1 << 1) /* Signal Tap Region */
+ u64 size; /* Region Size */
+ u8 *addr; /* Base address of the region */
+};
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info);
+
+/* Set eventfd information for ifpga port error interrupt */
+struct fpga_port_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set);
+
+#endif /* _OPAE_IFPGA_HW_API_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c
new file mode 100644
index 000000000..8e23ca18a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c
@@ -0,0 +1,650 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include "opae_intel_max10.h"
+#include <libfdt.h>
+
+int max10_reg_read(struct intel_max10_device *dev,
+ unsigned int reg, unsigned int *val)
+{
+ if (!dev)
+ return -ENODEV;
+
+ dev_debug(dev, "%s: bus:0x%x, reg:0x%x\n", __func__, dev->bus, reg);
+
+ return spi_transaction_read(dev->spi_tran_dev,
+ reg, 4, (unsigned char *)val);
+}
+
+int max10_reg_write(struct intel_max10_device *dev,
+ unsigned int reg, unsigned int val)
+{
+ unsigned int tmp = val;
+
+ if (!dev)
+ return -ENODEV;
+
+ dev_debug(dev, "%s: bus:0x%x, reg:0x%x, val:0x%x\n", __func__,
+ dev->bus, reg, val);
+
+ return spi_transaction_write(dev->spi_tran_dev,
+ reg, 4, (unsigned char *)&tmp);
+}
+
+int max10_sys_read(struct intel_max10_device *dev,
+ unsigned int offset, unsigned int *val)
+{
+ if (!dev)
+ return -ENODEV;
+
+
+ return max10_reg_read(dev, dev->base + offset, val);
+}
+
+int max10_sys_write(struct intel_max10_device *dev,
+ unsigned int offset, unsigned int val)
+{
+ if (!dev)
+ return -ENODEV;
+
+ return max10_reg_write(dev, dev->base + offset, val);
+}
+
+static struct max10_compatible_id max10_id_table[] = {
+ {.compatible = MAX10_PAC,},
+ {.compatible = MAX10_PAC_N3000,},
+ {.compatible = MAX10_PAC_END,}
+};
+
+static struct max10_compatible_id *max10_match_compatible(const char *fdt_root)
+{
+ struct max10_compatible_id *id = max10_id_table;
+
+ for (; strcmp(id->compatible, MAX10_PAC_END); id++) {
+ if (fdt_node_check_compatible(fdt_root, 0, id->compatible))
+ continue;
+
+ return id;
+ }
+
+ return NULL;
+}
+
+static inline bool
+is_max10_pac_n3000(struct intel_max10_device *max10)
+{
+ return max10->id && !strcmp(max10->id->compatible,
+ MAX10_PAC_N3000);
+}
+
+static void max10_check_capability(struct intel_max10_device *max10)
+{
+ if (!max10->fdt_root)
+ return;
+
+ if (is_max10_pac_n3000(max10)) {
+ max10->flags |= MAX10_FLAGS_NO_I2C2 |
+ MAX10_FLAGS_NO_BMCIMG_FLASH;
+ dev_info(max10, "found %s card\n", max10->id->compatible);
+ } else
+ max10->flags |= MAX10_FLAGS_MAC_CACHE;
+}
+
+static int altera_nor_flash_read(struct intel_max10_device *dev,
+ u32 offset, void *buffer, u32 len)
+{
+ int word_len;
+ int i;
+ unsigned int *buf = (unsigned int *)buffer;
+ unsigned int value;
+ int ret;
+
+ if (!dev || !buffer || len <= 0)
+ return -ENODEV;
+
+ word_len = len/4;
+
+ for (i = 0; i < word_len; i++) {
+ ret = max10_reg_read(dev, offset + i*4,
+ &value);
+ if (ret)
+ return -EBUSY;
+
+ *buf++ = value;
+ }
+
+ return 0;
+}
+
+static int enable_nor_flash(struct intel_max10_device *dev, bool on)
+{
+ unsigned int val = 0;
+ int ret;
+
+ ret = max10_sys_read(dev, RSU_REG, &val);
+ if (ret) {
+ dev_err(NULL "enabling flash error\n");
+ return ret;
+ }
+
+ if (on)
+ val |= RSU_ENABLE;
+ else
+ val &= ~RSU_ENABLE;
+
+ return max10_sys_write(dev, RSU_REG, val);
+}
+
+static int init_max10_device_table(struct intel_max10_device *max10)
+{
+ struct max10_compatible_id *id;
+ struct fdt_header hdr;
+ char *fdt_root = NULL;
+
+ u32 dt_size, dt_addr, val;
+ int ret;
+
+ ret = max10_sys_read(max10, DT_AVAIL_REG, &val);
+ if (ret) {
+ dev_err(max10 "cannot read DT_AVAIL_REG\n");
+ return ret;
+ }
+
+ if (!(val & DT_AVAIL)) {
+ dev_err(max10 "DT not available\n");
+ return -EINVAL;
+ }
+
+ ret = max10_sys_read(max10, DT_BASE_ADDR_REG, &dt_addr);
+ if (ret) {
+ dev_info(max10 "cannot get base addr of device table\n");
+ return ret;
+ }
+
+ ret = enable_nor_flash(max10, true);
+ if (ret) {
+ dev_err(max10 "fail to enable flash\n");
+ return ret;
+ }
+
+ ret = altera_nor_flash_read(max10, dt_addr, &hdr, sizeof(hdr));
+ if (ret) {
+ dev_err(max10 "read fdt header fail\n");
+ goto done;
+ }
+
+ ret = fdt_check_header(&hdr);
+ if (ret) {
+ dev_err(max10 "check fdt header fail\n");
+ goto done;
+ }
+
+ dt_size = fdt_totalsize(&hdr);
+ if (dt_size > DFT_MAX_SIZE) {
+ dev_err(max10 "invalid device table size\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ fdt_root = opae_malloc(dt_size);
+ if (!fdt_root) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = altera_nor_flash_read(max10, dt_addr, fdt_root, dt_size);
+ if (ret) {
+ dev_err(max10 "cannot read device table\n");
+ goto done;
+ }
+
+ id = max10_match_compatible(fdt_root);
+ if (!id) {
+ dev_err(max10 "max10 compatible not found\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ max10->flags |= MAX10_FLAGS_DEVICE_TABLE;
+
+ max10->id = id;
+ max10->fdt_root = fdt_root;
+
+done:
+ ret = enable_nor_flash(max10, false);
+
+ if (ret && fdt_root)
+ opae_free(fdt_root);
+
+ return ret;
+}
+
+static u64 fdt_get_number(const fdt32_t *cell, int size)
+{
+ u64 r = 0;
+
+ while (size--)
+ r = (r << 32) | fdt32_to_cpu(*cell++);
+
+ return r;
+}
+
+static int fdt_get_reg(const void *fdt, int node, unsigned int idx,
+ u64 *start, u64 *size)
+{
+ const fdt32_t *prop, *end;
+ int na = 0, ns = 0, len = 0, parent;
+
+ parent = fdt_parent_offset(fdt, node);
+ if (parent < 0)
+ return parent;
+
+ prop = fdt_getprop(fdt, parent, "#address-cells", NULL);
+ na = prop ? fdt32_to_cpu(*prop) : 2;
+
+ prop = fdt_getprop(fdt, parent, "#size-cells", NULL);
+ ns = prop ? fdt32_to_cpu(*prop) : 2;
+
+ prop = fdt_getprop(fdt, node, "reg", &len);
+ if (!prop)
+ return -FDT_ERR_NOTFOUND;
+
+ end = prop + len/sizeof(*prop);
+ prop = prop + (na + ns) * idx;
+
+ if (prop + na + ns > end)
+ return -FDT_ERR_NOTFOUND;
+
+ *start = fdt_get_number(prop, na);
+ *size = fdt_get_number(prop + na, ns);
+
+ return 0;
+}
+
+static int __fdt_stringlist_search(const void *fdt, int offset,
+ const char *prop, const char *string)
+{
+ int length, len, index = 0;
+ const char *list, *end;
+
+ list = fdt_getprop(fdt, offset, prop, &length);
+ if (!list)
+ return length;
+
+ len = strlen(string) + 1;
+ end = list + length;
+
+ while (list < end) {
+ length = strnlen(list, end - list) + 1;
+
+ if (list + length > end)
+ return -FDT_ERR_BADVALUE;
+
+ if (length == len && memcmp(list, string, length) == 0)
+ return index;
+
+ list += length;
+ index++;
+ }
+
+ return -FDT_ERR_NOTFOUND;
+}
+
+static int fdt_get_named_reg(const void *fdt, int node, const char *name,
+ u64 *start, u64 *size)
+{
+ int idx;
+
+ idx = __fdt_stringlist_search(fdt, node, "reg-names", name);
+ if (idx < 0)
+ return idx;
+
+ return fdt_get_reg(fdt, node, idx, start, size);
+}
+
+static void max10_sensor_uinit(struct intel_max10_device *dev)
+{
+ struct opae_sensor_info *info;
+
+ TAILQ_FOREACH(info, &dev->opae_sensor_list, node) {
+ TAILQ_REMOVE(&dev->opae_sensor_list, info, node);
+ opae_free(info);
+ }
+}
+
+static bool sensor_reg_valid(struct sensor_reg *reg)
+{
+ return !!reg->size;
+}
+
+static int max10_add_sensor(struct intel_max10_device *dev,
+ struct raw_sensor_info *info, struct opae_sensor_info *sensor)
+{
+ int i;
+ int ret = 0;
+ unsigned int val;
+
+ if (!info || !sensor)
+ return -ENODEV;
+
+ sensor->id = info->id;
+ sensor->name = info->name;
+ sensor->type = info->type;
+ sensor->multiplier = info->multiplier;
+
+ for (i = SENSOR_REG_VALUE; i < SENSOR_REG_MAX; i++) {
+ if (!sensor_reg_valid(&info->regs[i]))
+ continue;
+
+ ret = max10_sys_read(dev, info->regs[i].regoff, &val);
+ if (ret)
+ break;
+
+ if (val == 0xdeadbeef) {
+ dev_debug(dev, "%s: sensor:%s invalid 0x%x at:%d\n",
+ __func__, sensor->name, val, i);
+ continue;
+ }
+
+ val *= info->multiplier;
+
+ switch (i) {
+ case SENSOR_REG_VALUE:
+ sensor->value_reg = info->regs[i].regoff;
+ sensor->flags |= OPAE_SENSOR_VALID;
+ break;
+ case SENSOR_REG_HIGH_WARN:
+ sensor->high_warn = val;
+ sensor->flags |= OPAE_SENSOR_HIGH_WARN_VALID;
+ break;
+ case SENSOR_REG_HIGH_FATAL:
+ sensor->high_fatal = val;
+ sensor->flags |= OPAE_SENSOR_HIGH_FATAL_VALID;
+ break;
+ case SENSOR_REG_LOW_WARN:
+ sensor->low_warn = val;
+ sensor->flags |= OPAE_SENSOR_LOW_WARN_VALID;
+ break;
+ case SENSOR_REG_LOW_FATAL:
+ sensor->low_fatal = val;
+ sensor->flags |= OPAE_SENSOR_LOW_FATAL_VALID;
+ break;
+ case SENSOR_REG_HYSTERESIS:
+ sensor->hysteresis = val;
+ sensor->flags |= OPAE_SENSOR_HYSTERESIS_VALID;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+max10_sensor_init(struct intel_max10_device *dev, int parent)
+{
+ int i, ret = 0, offset = 0;
+ const fdt32_t *num;
+ const char *ptr;
+ u64 start, size;
+ struct raw_sensor_info *raw;
+ struct opae_sensor_info *sensor;
+ char *fdt_root = dev->fdt_root;
+
+ if (!fdt_root) {
+ dev_debug(dev, "skip sensor init as not find Device Tree\n");
+ return 0;
+ }
+
+ fdt_for_each_subnode(offset, fdt_root, parent) {
+ ptr = fdt_get_name(fdt_root, offset, NULL);
+ if (!ptr) {
+ dev_err(dev, "failed to fdt get name\n");
+ continue;
+ }
+
+ if (!strstr(ptr, "sensor")) {
+ dev_debug(dev, "%s is not a sensor node\n", ptr);
+ continue;
+ }
+
+ dev_debug(dev, "found sensor node %s\n", ptr);
+
+ raw = (struct raw_sensor_info *)opae_zmalloc(sizeof(*raw));
+ if (!raw) {
+ ret = -ENOMEM;
+ goto free_sensor;
+ }
+
+ raw->name = fdt_getprop(fdt_root, offset, "sensor_name", NULL);
+ if (!raw->name) {
+ ret = -EINVAL;
+ goto free_sensor;
+ }
+
+ raw->type = fdt_getprop(fdt_root, offset, "type", NULL);
+ if (!raw->type) {
+ ret = -EINVAL;
+ goto free_sensor;
+ }
+
+ for (i = SENSOR_REG_VALUE; i < SENSOR_REG_MAX; i++) {
+ ret = fdt_get_named_reg(fdt_root, offset,
+ sensor_reg_name[i], &start,
+ &size);
+ if (ret) {
+ dev_debug(dev, "no found %d: sensor node %s, %s\n",
+ ret, ptr, sensor_reg_name[i]);
+ if (i == SENSOR_REG_VALUE) {
+ ret = -EINVAL;
+ goto free_sensor;
+ }
+
+ continue;
+ }
+
+ /* This is a hack to compatible with non-secure
+ * solution. If sensors are included in root node,
+ * then it's non-secure dtb, which use absolute addr
+ * of non-secure solution.
+ */
+ if (parent)
+ raw->regs[i].regoff = start;
+ else
+ raw->regs[i].regoff = start -
+ MAX10_BASE_ADDR;
+ raw->regs[i].size = size;
+ }
+
+ num = fdt_getprop(fdt_root, offset, "id", NULL);
+ if (!num) {
+ ret = -EINVAL;
+ goto free_sensor;
+ }
+
+ raw->id = fdt32_to_cpu(*num);
+ num = fdt_getprop(fdt_root, offset, "multiplier", NULL);
+ raw->multiplier = num ? fdt32_to_cpu(*num) : 1;
+
+ dev_debug(dev, "found sensor from DTB: %s: %s: %u: %u\n",
+ raw->name, raw->type,
+ raw->id, raw->multiplier);
+
+ for (i = SENSOR_REG_VALUE; i < SENSOR_REG_MAX; i++)
+ dev_debug(dev, "sensor reg[%d]: %x: %zu\n",
+ i, raw->regs[i].regoff,
+ raw->regs[i].size);
+
+ sensor = opae_zmalloc(sizeof(*sensor));
+ if (!sensor) {
+ ret = -EINVAL;
+ goto free_sensor;
+ }
+
+ if (max10_add_sensor(dev, raw, sensor)) {
+ ret = -EINVAL;
+ opae_free(sensor);
+ goto free_sensor;
+ }
+
+ if (sensor->flags & OPAE_SENSOR_VALID) {
+ TAILQ_INSERT_TAIL(&dev->opae_sensor_list, sensor, node);
+ dev_info(dev, "found valid sensor: %s\n", sensor->name);
+ } else
+ opae_free(sensor);
+
+ opae_free(raw);
+ }
+
+ return 0;
+
+free_sensor:
+ if (raw)
+ opae_free(raw);
+ max10_sensor_uinit(dev);
+ return ret;
+}
+
+static int check_max10_version(struct intel_max10_device *dev)
+{
+ unsigned int v;
+
+ if (!max10_reg_read(dev, MAX10_SEC_BASE_ADDR + MAX10_BUILD_VER,
+ &v)) {
+ if (v != 0xffffffff) {
+ dev_info(dev, "secure MAX10 detected\n");
+ dev->base = MAX10_SEC_BASE_ADDR;
+ dev->flags |= MAX10_FLAGS_SECURE;
+ } else {
+ dev_info(dev, "non-secure MAX10 detected\n");
+ dev->base = MAX10_BASE_ADDR;
+ }
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int
+max10_secure_hw_init(struct intel_max10_device *dev)
+{
+ int offset, sysmgr_offset = 0;
+ char *fdt_root;
+
+ fdt_root = dev->fdt_root;
+ if (!fdt_root) {
+ dev_debug(dev, "skip init as not find Device Tree\n");
+ return 0;
+ }
+
+ fdt_for_each_subnode(offset, fdt_root, 0) {
+ if (!fdt_node_check_compatible(fdt_root, offset,
+ "intel-max10,system-manager")) {
+ sysmgr_offset = offset;
+ break;
+ }
+ }
+
+ max10_check_capability(dev);
+
+ max10_sensor_init(dev, sysmgr_offset);
+
+ return 0;
+}
+
+static int
+max10_non_secure_hw_init(struct intel_max10_device *dev)
+{
+ max10_check_capability(dev);
+
+ max10_sensor_init(dev, 0);
+
+ return 0;
+}
+
+struct intel_max10_device *
+intel_max10_device_probe(struct altera_spi_device *spi,
+ int chipselect)
+{
+ struct intel_max10_device *dev;
+ int ret;
+ unsigned int val;
+
+ dev = opae_malloc(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ TAILQ_INIT(&dev->opae_sensor_list);
+
+ dev->spi_master = spi;
+
+ dev->spi_tran_dev = spi_transaction_init(spi, chipselect);
+ if (!dev->spi_tran_dev) {
+ dev_err(dev, "%s spi tran init fail\n", __func__);
+ goto free_dev;
+ }
+
+ /* check the max10 version */
+ ret = check_max10_version(dev);
+ if (ret) {
+ dev_err(dev, "Failed to find max10 hardware!\n");
+ goto free_dev;
+ }
+
+ /* load the MAX10 device table */
+ ret = init_max10_device_table(dev);
+ if (ret) {
+ dev_err(dev, "Init max10 device table fail\n");
+ goto free_dev;
+ }
+
+ /* init max10 devices, like sensor*/
+ if (dev->flags & MAX10_FLAGS_SECURE)
+ ret = max10_secure_hw_init(dev);
+ else
+ ret = max10_non_secure_hw_init(dev);
+ if (ret) {
+ dev_err(dev, "Failed to init max10 hardware!\n");
+ goto free_dtb;
+ }
+
+ /* read FPGA loading information */
+ ret = max10_sys_read(dev, FPGA_PAGE_INFO, &val);
+ if (ret) {
+ dev_err(dev, "fail to get FPGA loading info\n");
+ goto release_max10_hw;
+ }
+ dev_info(dev, "FPGA loaded from %s Image\n", val ? "User" : "Factory");
+
+ return dev;
+
+release_max10_hw:
+ max10_sensor_uinit(dev);
+free_dtb:
+ if (dev->fdt_root)
+ opae_free(dev->fdt_root);
+ if (dev->spi_tran_dev)
+ spi_transaction_remove(dev->spi_tran_dev);
+free_dev:
+ opae_free(dev);
+
+ return NULL;
+}
+
+int intel_max10_device_remove(struct intel_max10_device *dev)
+{
+ if (!dev)
+ return 0;
+
+ max10_sensor_uinit(dev);
+
+ if (dev->spi_tran_dev)
+ spi_transaction_remove(dev->spi_tran_dev);
+
+ if (dev->fdt_root)
+ opae_free(dev->fdt_root);
+
+ opae_free(dev);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.h
new file mode 100644
index 000000000..123cdc48b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_intel_max10.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _OPAE_INTEL_MAX10_H_
+#define _OPAE_INTEL_MAX10_H_
+
+#include "opae_osdep.h"
+#include "opae_spi.h"
+
+struct max10_compatible_id {
+ char compatible[128];
+};
+
+#define MAX10_PAC "intel,max10"
+#define MAX10_PAC_N3000 "intel,max10-pac-n3000"
+#define MAX10_PAC_END "intel,end"
+
+/* max10 capability flags */
+#define MAX10_FLAGS_NO_I2C2 BIT(0)
+#define MAX10_FLAGS_NO_BMCIMG_FLASH BIT(1)
+#define MAX10_FLAGS_DEVICE_TABLE BIT(2)
+#define MAX10_FLAGS_SPI BIT(3)
+#define MAX10_FLGAS_NIOS_SPI BIT(4)
+#define MAX10_FLAGS_PKVL BIT(5)
+#define MAX10_FLAGS_SECURE BIT(6)
+#define MAX10_FLAGS_MAC_CACHE BIT(7)
+
+/** List of opae sensors */
+TAILQ_HEAD(opae_sensor_list, opae_sensor_info);
+
+struct intel_max10_device {
+ unsigned int flags; /*max10 hardware capability*/
+ struct altera_spi_device *spi_master;
+ struct spi_transaction_dev *spi_tran_dev;
+ struct max10_compatible_id *id; /*max10 compatible*/
+ char *fdt_root;
+ unsigned int base; /* max10 base address */
+ u16 bus;
+ struct opae_sensor_list opae_sensor_list;
+};
+
+/* retimer speed */
+enum retimer_speed {
+ MXD_1GB = 1,
+ MXD_2_5GB = 2,
+ MXD_5GB = 5,
+ MXD_10GB = 10,
+ MXD_25GB = 25,
+ MXD_40GB = 40,
+ MXD_100GB = 100,
+ MXD_SPEED_UNKNOWN,
+};
+
+/* retimer info */
+struct opae_retimer_info {
+ unsigned int nums_retimer;
+ unsigned int ports_per_retimer;
+ unsigned int nums_fvl;
+ unsigned int ports_per_fvl;
+ enum retimer_speed support_speed;
+};
+
+/* retimer status*/
+struct opae_retimer_status {
+ enum retimer_speed speed;
+ /*
+ * retimer line link status bitmap:
+ * bit 0: Retimer0 Port0 link status
+ * bit 1: Retimer0 Port1 link status
+ * bit 2: Retimer0 Port2 link status
+ * bit 3: Retimer0 Port3 link status
+ *
+ * bit 4: Retimer1 Port0 link status
+ * bit 5: Retimer1 Port1 link status
+ * bit 6: Retimer1 Port2 link status
+ * bit 7: Retimer1 Port3 link status
+ */
+ unsigned int line_link_bitmap;
+};
+
+#define FLASH_BASE 0x10000000
+#define FLASH_OPTION_BITS 0x10000
+
+/* System Registers */
+#define MAX10_BASE_ADDR 0x300400
+#define MAX10_SEC_BASE_ADDR 0x300800
+/* Register offset of system registers */
+#define NIOS2_FW_VERSION 0x0
+#define MAX10_MACADDR1 0x10
+#define MAX10_MAC_BYTE4 GENMASK(7, 0)
+#define MAX10_MAC_BYTE3 GENMASK(15, 8)
+#define MAX10_MAC_BYTE2 GENMASK(23, 16)
+#define MAX10_MAC_BYTE1 GENMASK(31, 24)
+#define MAX10_MACADDR2 0x14
+#define MAX10_MAC_BYTE6 GENMASK(7, 0)
+#define MAX10_MAC_BYTE5 GENMASK(15, 8)
+#define MAX10_MAC_COUNT GENMASK(23, 16)
+#define RSU_REG 0x2c
+#define FPGA_RECONF_PAGE GENMASK(2, 0)
+#define FPGA_RP_LOAD BIT(3)
+#define NIOS2_PRERESET BIT(4)
+#define NIOS2_HANG BIT(5)
+#define RSU_ENABLE BIT(6)
+#define NIOS2_RESET BIT(7)
+#define NIOS2_I2C2_POLL_STOP BIT(13)
+#define PKVL_EEPROM_LOAD BIT(31)
+#define FPGA_RECONF_REG 0x30
+#define MAX10_TEST_REG 0x3c
+#define COUNTDOWN_START BIT(18)
+#define MAX10_BUILD_VER 0x68
+#define MAX10_VERSION_MAJOR GENMASK(23, 16)
+#define PCB_INFO GENMASK(31, 24)
+#define FPGA_PAGE_INFO 0x6c
+#define DT_AVAIL_REG 0x90
+#define DT_AVAIL BIT(0)
+#define DT_BASE_ADDR_REG 0x94
+#define MAX10_DOORBELL 0x400
+#define RSU_REQUEST BIT(0)
+#define SEC_PROGRESS GENMASK(7, 4)
+#define HOST_STATUS GENMASK(11, 8)
+#define SEC_STATUS GENMASK(23, 16)
+
+/* PKVL related registers, in system register region */
+#define PKVL_POLLING_CTRL 0x80
+#define POLLING_MODE GENMASK(15, 0)
+#define PKVL_A_PRELOAD BIT(16)
+#define PKVL_A_PRELOAD_TIMEOUT BIT(17)
+#define PKVL_A_DATA_TOO_BIG BIT(18)
+#define PKVL_A_HDR_CHECKSUM BIT(20)
+#define PKVL_B_PRELOAD BIT(24)
+#define PKVL_B_PRELOAD_TIMEOUT BIT(25)
+#define PKVL_B_DATA_TOO_BIG BIT(26)
+#define PKVL_B_HDR_CHECKSUM BIT(28)
+#define PKVL_EEPROM_UPG_STATUS GENMASK(31, 16)
+#define PKVL_LINK_STATUS 0x164
+#define PKVL_A_VERSION 0x254
+#define PKVL_B_VERSION 0x258
+#define SERDES_VERSION GENMASK(15, 0)
+#define SBUS_VERSION GENMASK(31, 16)
+
+#define DFT_MAX_SIZE 0x7e0000
+
+int max10_reg_read(struct intel_max10_device *dev,
+ unsigned int reg, unsigned int *val);
+int max10_reg_write(struct intel_max10_device *dev,
+ unsigned int reg, unsigned int val);
+int max10_sys_read(struct intel_max10_device *dev,
+ unsigned int offset, unsigned int *val);
+int max10_sys_write(struct intel_max10_device *dev,
+ unsigned int offset, unsigned int val);
+struct intel_max10_device *
+intel_max10_device_probe(struct altera_spi_device *spi,
+ int chipselect);
+int intel_max10_device_remove(struct intel_max10_device *dev);
+
+
+#define SENSOR_REG_VALUE 0x0
+#define SENSOR_REG_HIGH_WARN 0x1
+#define SENSOR_REG_HIGH_FATAL 0x2
+#define SENSOR_REG_LOW_WARN 0x3
+#define SENSOR_REG_LOW_FATAL 0x4
+#define SENSOR_REG_HYSTERESIS 0x5
+#define SENSOR_REG_MAX 0x6
+
+static const char * const sensor_reg_name[] = {
+ "value",
+ "high_warn",
+ "high_fatal",
+ "low_warn",
+ "low_fatal",
+ "hysteresis",
+};
+
+struct sensor_reg {
+ unsigned int regoff;
+ size_t size;
+};
+
+struct raw_sensor_info {
+ const char *name;
+ const char *type;
+ unsigned int id;
+ unsigned int multiplier;
+ struct sensor_reg regs[SENSOR_REG_MAX];
+};
+
+#define OPAE_SENSOR_VALID 0x1
+#define OPAE_SENSOR_HIGH_WARN_VALID 0x2
+#define OPAE_SENSOR_HIGH_FATAL_VALID 0x4
+#define OPAE_SENSOR_LOW_WARN_VALID 0x8
+#define OPAE_SENSOR_LOW_FATAL_VALID 0x10
+#define OPAE_SENSOR_HYSTERESIS_VALID 0x20
+
+struct opae_sensor_info {
+ TAILQ_ENTRY(opae_sensor_info) node;
+ const char *name;
+ const char *type;
+ unsigned int id;
+ unsigned int high_fatal;
+ unsigned int high_warn;
+ unsigned int low_fatal;
+ unsigned int low_warn;
+ unsigned int hysteresis;
+ unsigned int multiplier;
+ unsigned int flags;
+ unsigned int value;
+ unsigned int value_reg;
+};
+
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_osdep.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_osdep.h
new file mode 100644
index 000000000..416cef01c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_osdep.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_OSDEP_H
+#define _OPAE_OSDEP_H
+
+#include <string.h>
+#include <stdbool.h>
+
+#ifdef RTE_LIBRTE_EAL
+#include "osdep_rte/osdep_generic.h"
+#else
+#include "osdep_raw/osdep_generic.h"
+#endif
+
+#define __iomem
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+
+struct uuid {
+ u8 b[16];
+};
+
+#ifndef LINUX_MACROS
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
+#endif
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#ifndef GENMASK
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif /* GENMASK */
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#endif /* GENMASK_ULL */
+#endif /* LINUX_MACROS */
+
+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
+#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define udelay(x) opae_udelay(x)
+#define msleep(x) opae_udelay(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#define time_after(a, b) ((long)((b) - (a)) < 0)
+#define time_before(a, b) time_after(b, a)
+#define opae_memset(a, b, c) memset((a), (b), (c))
+
+#define opae_readq_poll_timeout(addr, val, cond, invl, timeout)\
+({ \
+ int wait = 0; \
+ for (; wait <= timeout; wait += invl) { \
+ (val) = opae_readq(addr); \
+ if (cond) \
+ break; \
+ udelay(invl); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.c
new file mode 100644
index 000000000..bfdc83e6c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.c
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include "opae_osdep.h"
+#include "opae_spi.h"
+
+static int nios_spi_indirect_read(struct altera_spi_device *dev, u32 reg,
+ u32 *val)
+{
+ u64 ctrl = 0;
+ u64 stat = 0;
+ int loops = SPI_MAX_RETRY;
+
+ ctrl = NIOS_SPI_RD | ((u64)reg << 32);
+ opae_writeq(ctrl, dev->regs + NIOS_SPI_CTRL);
+
+ stat = opae_readq(dev->regs + NIOS_SPI_STAT);
+ while (!(stat & NIOS_SPI_VALID) && --loops)
+ stat = opae_readq(dev->regs + NIOS_SPI_STAT);
+
+ *val = stat & NIOS_SPI_READ_DATA;
+
+ return loops ? 0 : -ETIMEDOUT;
+}
+
+static int nios_spi_indirect_write(struct altera_spi_device *dev, u32 reg,
+ u32 value)
+{
+
+ u64 ctrl = 0;
+ u64 stat = 0;
+ int loops = SPI_MAX_RETRY;
+
+ ctrl |= NIOS_SPI_WR | (u64)reg << 32;
+ ctrl |= value & NIOS_SPI_WRITE_DATA;
+
+ opae_writeq(ctrl, dev->regs + NIOS_SPI_CTRL);
+
+ stat = opae_readq(dev->regs + NIOS_SPI_STAT);
+ while (!(stat & NIOS_SPI_VALID) && --loops)
+ stat = opae_readq(dev->regs + NIOS_SPI_STAT);
+
+ return loops ? 0 : -ETIMEDOUT;
+}
+
+static int spi_indirect_write(struct altera_spi_device *dev, u32 reg,
+ u32 value)
+{
+ u64 ctrl;
+
+ opae_writeq(value & WRITE_DATA_MASK, dev->regs + SPI_WRITE);
+
+ ctrl = CTRL_W | (reg >> 2);
+ opae_writeq(ctrl, dev->regs + SPI_CTRL);
+
+ return 0;
+}
+
+static int spi_indirect_read(struct altera_spi_device *dev, u32 reg,
+ u32 *val)
+{
+ u64 tmp;
+ u64 ctrl;
+
+ ctrl = CTRL_R | (reg >> 2);
+ opae_writeq(ctrl, dev->regs + SPI_CTRL);
+
+ /**
+ * FIXME: Read one more time to avoid HW timing issue. This is
+ * a short term workaround solution, and must be removed once
+ * hardware fixing is done.
+ */
+ tmp = opae_readq(dev->regs + SPI_READ);
+
+ *val = (u32)tmp;
+
+ return 0;
+}
+
+int spi_reg_write(struct altera_spi_device *dev, u32 reg,
+ u32 value)
+{
+ return dev->reg_write(dev, reg, value);
+}
+
+int spi_reg_read(struct altera_spi_device *dev, u32 reg,
+ u32 *val)
+{
+ return dev->reg_read(dev, reg, val);
+}
+
+void spi_cs_activate(struct altera_spi_device *dev, unsigned int chip_select)
+{
+ spi_reg_write(dev, ALTERA_SPI_SLAVE_SEL, 1 << chip_select);
+ spi_reg_write(dev, ALTERA_SPI_CONTROL, ALTERA_SPI_CONTROL_SSO_MSK);
+}
+
+void spi_cs_deactivate(struct altera_spi_device *dev)
+{
+ spi_reg_write(dev, ALTERA_SPI_CONTROL, 0);
+}
+
+static int spi_flush_rx(struct altera_spi_device *dev)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = spi_reg_read(dev, ALTERA_SPI_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK) {
+ ret = spi_reg_read(dev, ALTERA_SPI_RXDATA, &val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned int spi_write_bytes(struct altera_spi_device *dev, int count)
+{
+ unsigned int val = 0;
+ u16 *p16;
+ u32 *p32;
+
+ if (dev->txbuf) {
+ switch (dev->data_width) {
+ case 1:
+ val = dev->txbuf[count];
+ break;
+ case 2:
+ p16 = (u16 *)(dev->txbuf + 2*count);
+ val = *p16;
+ if (dev->endian == SPI_BIG_ENDIAN)
+ val = cpu_to_be16(val);
+ break;
+ case 4:
+ p32 = (u32 *)(dev->txbuf + 4*count);
+ val = *p32;
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void spi_fill_readbuffer(struct altera_spi_device *dev,
+ unsigned int value, int count)
+{
+ u16 *p16;
+ u32 *p32;
+
+ if (dev->rxbuf) {
+ switch (dev->data_width) {
+ case 1:
+ dev->rxbuf[count] = value;
+ break;
+ case 2:
+ p16 = (u16 *)(dev->rxbuf + 2*count);
+ if (dev->endian == SPI_BIG_ENDIAN)
+ *p16 = cpu_to_be16((u16)value);
+ else
+ *p16 = (u16)value;
+ break;
+ case 4:
+ p32 = (u32 *)(dev->rxbuf + 4*count);
+ if (dev->endian == SPI_BIG_ENDIAN)
+ *p32 = cpu_to_be32(value);
+ else
+ *p32 = value;
+ break;
+ }
+ }
+}
+
+static int spi_txrx(struct altera_spi_device *dev)
+{
+ unsigned int count = 0;
+ u32 rxd;
+ unsigned int tx_data;
+ u32 status;
+ int ret;
+
+ while (count < dev->len) {
+ tx_data = spi_write_bytes(dev, count);
+ spi_reg_write(dev, ALTERA_SPI_TXDATA, tx_data);
+
+ while (1) {
+ ret = spi_reg_read(dev, ALTERA_SPI_STATUS, &status);
+ if (ret)
+ return -EIO;
+ if (status & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
+ }
+
+ ret = spi_reg_read(dev, ALTERA_SPI_RXDATA, &rxd);
+ if (ret)
+ return -EIO;
+
+ spi_fill_readbuffer(dev, rxd, count);
+
+ count++;
+ }
+
+ return 0;
+}
+
+int spi_command(struct altera_spi_device *dev, unsigned int chip_select,
+ unsigned int wlen, void *wdata,
+ unsigned int rlen, void *rdata)
+{
+ if (((wlen > 0) && !wdata) || ((rlen > 0) && !rdata)) {
+ dev_err(dev, "error on spi command checking\n");
+ return -EINVAL;
+ }
+
+ wlen = wlen / dev->data_width;
+ rlen = rlen / dev->data_width;
+
+ /* flush rx buffer */
+ spi_flush_rx(dev);
+
+ spi_cs_activate(dev, chip_select);
+ if (wlen) {
+ dev->txbuf = wdata;
+ dev->rxbuf = rdata;
+ dev->len = wlen;
+ spi_txrx(dev);
+ }
+ if (rlen) {
+ dev->rxbuf = rdata;
+ dev->txbuf = NULL;
+ dev->len = rlen;
+ spi_txrx(dev);
+ }
+ spi_cs_deactivate(dev);
+ return 0;
+}
+
+struct altera_spi_device *altera_spi_alloc(void *base, int type)
+{
+ struct altera_spi_device *spi_dev =
+ opae_malloc(sizeof(struct altera_spi_device));
+
+ if (!spi_dev)
+ return NULL;
+
+ spi_dev->regs = base;
+
+ switch (type) {
+ case TYPE_SPI:
+ spi_dev->reg_read = spi_indirect_read;
+ spi_dev->reg_write = spi_indirect_write;
+ break;
+ case TYPE_NIOS_SPI:
+ spi_dev->reg_read = nios_spi_indirect_read;
+ spi_dev->reg_write = nios_spi_indirect_write;
+ break;
+ default:
+ dev_err(dev, "%s: invalid SPI type\n", __func__);
+ goto error;
+ }
+
+ return spi_dev;
+
+error:
+ altera_spi_release(spi_dev);
+ return NULL;
+}
+
+void altera_spi_init(struct altera_spi_device *spi_dev)
+{
+ spi_dev->spi_param.info = opae_readq(spi_dev->regs + SPI_CORE_PARAM);
+
+ spi_dev->data_width = spi_dev->spi_param.data_width / 8;
+ spi_dev->endian = spi_dev->spi_param.endian;
+ spi_dev->num_chipselect = spi_dev->spi_param.num_chipselect;
+ dev_info(spi_dev, "spi param: type=%d, data width:%d, endian:%d, clock_polarity=%d, clock=%dMHz, chips=%d, cpha=%d\n",
+ spi_dev->spi_param.type,
+ spi_dev->data_width, spi_dev->endian,
+ spi_dev->spi_param.clock_polarity,
+ spi_dev->spi_param.clock,
+ spi_dev->num_chipselect,
+ spi_dev->spi_param.clock_phase);
+
+ /* clear */
+ spi_reg_write(spi_dev, ALTERA_SPI_CONTROL, 0);
+ spi_reg_write(spi_dev, ALTERA_SPI_STATUS, 0);
+ /* flush rxdata */
+ spi_flush_rx(spi_dev);
+}
+
+void altera_spi_release(struct altera_spi_device *dev)
+{
+ if (dev)
+ opae_free(dev);
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.h b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.h
new file mode 100644
index 000000000..d20a4c3ed
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#ifndef _OPAE_SPI_H
+#define _OPAE_SPI_H
+
+#include "opae_osdep.h"
+
+#define ALTERA_SPI_RXDATA 0
+#define ALTERA_SPI_TXDATA 4
+#define ALTERA_SPI_STATUS 8
+#define ALTERA_SPI_CONTROL 12
+#define ALTERA_SPI_SLAVE_SEL 20
+
+#define ALTERA_SPI_STATUS_ROE_MSK 0x8
+#define ALTERA_SPI_STATUS_TOE_MSK 0x10
+#define ALTERA_SPI_STATUS_TMT_MSK 0x20
+#define ALTERA_SPI_STATUS_TRDY_MSK 0x40
+#define ALTERA_SPI_STATUS_RRDY_MSK 0x80
+#define ALTERA_SPI_STATUS_E_MSK 0x100
+
+#define ALTERA_SPI_CONTROL_IROE_MSK 0x8
+#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10
+#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40
+#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80
+#define ALTERA_SPI_CONTROL_IE_MSK 0x100
+#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
+
+#define SPI_CORE_PARAM 0x8
+#define SPI_CTRL 0x10
+#define CTRL_R BIT_ULL(9)
+#define CTRL_W BIT_ULL(8)
+#define CTRL_ADDR_MASK GENMASK_ULL(2, 0)
+#define SPI_READ 0x18
+#define READ_DATA_VALID BIT_ULL(32)
+#define READ_DATA_MASK GENMASK_ULL(31, 0)
+#define SPI_WRITE 0x20
+#define WRITE_DATA_MASK GENMASK_ULL(31, 0)
+
+#define SPI_MAX_RETRY 1000000
+
+#define TYPE_SPI 0
+#define TYPE_NIOS_SPI 1
+
+struct spi_core_param {
+ union {
+ u64 info;
+ struct {
+ u8 type:1;
+ u8 endian:1;
+ u8 data_width:6;
+ u8 num_chipselect:6;
+ u8 clock_polarity:1;
+ u8 clock_phase:1;
+ u8 stages:2;
+ u8 resvd:4;
+ u16 clock:10;
+ u16 peripheral_id:16;
+ u8 controller_type:1;
+ u16 resvd1:15;
+ };
+ };
+};
+
+struct altera_spi_device {
+ u8 *regs;
+ struct spi_core_param spi_param;
+ int data_width; /* how many bytes for data width */
+ int endian;
+ #define SPI_BIG_ENDIAN 0
+ #define SPI_LITTLE_ENDIAN 1
+ int num_chipselect;
+ unsigned char *rxbuf;
+ unsigned char *txbuf;
+ unsigned int len;
+ int (*reg_read)(struct altera_spi_device *dev, u32 reg, u32 *val);
+ int (*reg_write)(struct altera_spi_device *dev, u32 reg,
+ u32 value);
+};
+
+#define HEADER_LEN 8
+#define RESPONSE_LEN 4
+#define SPI_TRANSACTION_MAX_LEN 1024
+#define TRAN_SEND_MAX_LEN (SPI_TRANSACTION_MAX_LEN + HEADER_LEN)
+#define TRAN_RESP_MAX_LEN SPI_TRANSACTION_MAX_LEN
+#define PACKET_SEND_MAX_LEN (2*TRAN_SEND_MAX_LEN + 4)
+#define PACKET_RESP_MAX_LEN (2*TRAN_RESP_MAX_LEN + 4)
+#define BYTES_SEND_MAX_LEN (2*PACKET_SEND_MAX_LEN)
+#define BYTES_RESP_MAX_LEN (2*PACKET_RESP_MAX_LEN)
+
+struct spi_tran_buffer {
+ unsigned char tran_send[TRAN_SEND_MAX_LEN];
+ unsigned char tran_resp[TRAN_RESP_MAX_LEN];
+ unsigned char packet_send[PACKET_SEND_MAX_LEN];
+ unsigned char packet_resp[PACKET_RESP_MAX_LEN];
+ unsigned char bytes_send[BYTES_SEND_MAX_LEN];
+ unsigned char bytes_resp[2*BYTES_RESP_MAX_LEN];
+};
+
+struct spi_transaction_dev {
+ struct altera_spi_device *dev;
+ int chipselect;
+ struct spi_tran_buffer *buffer;
+ pthread_mutex_t lock;
+};
+
+struct spi_tran_header {
+ u8 trans_type;
+ u8 reserve;
+ u16 size;
+ u32 addr;
+};
+
+int spi_command(struct altera_spi_device *dev, unsigned int chip_select,
+ unsigned int wlen, void *wdata, unsigned int rlen, void *rdata);
+void spi_cs_deactivate(struct altera_spi_device *dev);
+void spi_cs_activate(struct altera_spi_device *dev, unsigned int chip_select);
+struct altera_spi_device *altera_spi_alloc(void *base, int type);
+void altera_spi_init(struct altera_spi_device *dev);
+void altera_spi_release(struct altera_spi_device *dev);
+int spi_transaction_read(struct spi_transaction_dev *dev, unsigned int addr,
+ unsigned int size, unsigned char *data);
+int spi_transaction_write(struct spi_transaction_dev *dev, unsigned int addr,
+ unsigned int size, unsigned char *data);
+struct spi_transaction_dev *spi_transaction_init(struct altera_spi_device *dev,
+ int chipselect);
+void spi_transaction_remove(struct spi_transaction_dev *dev);
+int spi_reg_write(struct altera_spi_device *dev, u32 reg,
+ u32 value);
+int spi_reg_read(struct altera_spi_device *dev, u32 reg, u32 *val);
+
+#define NIOS_SPI_PARAM 0x8
+#define CONTROL_TYPE BIT_ULL(48)
+#define PERI_ID GENMASK_ULL(47, 32)
+#define SPI_CLK GENMASK_ULL(31, 22)
+#define SYNC_STAGES GENMASK_ULL(17, 16)
+#define CLOCK_PHASE BIT_ULL(15)
+#define CLOCK_POLARITY BIT_ULL(14)
+#define NUM_SELECT GENMASK_ULL(13, 8)
+#define DATA_WIDTH GENMASK_ULL(7, 2)
+#define SHIFT_DIRECTION BIT_ULL(1)
+#define SPI_TYPE BIT_ULL(0)
+#define NIOS_SPI_CTRL 0x10
+#define NIOS_SPI_RD (0x1ULL << 62)
+#define NIOS_SPI_WR (0x2ULL << 62)
+#define NIOS_SPI_COMMAND GENMASK_ULL(63, 62)
+#define NIOS_SPI_ADDR GENMASK_ULL(44, 32)
+#define NIOS_SPI_WRITE_DATA GENMASK_ULL(31, 0)
+#define NIOS_SPI_STAT 0x18
+#define NIOS_SPI_VALID BIT_ULL(32)
+#define NIOS_SPI_READ_DATA GENMASK_ULL(31, 0)
+
+#define NIOS_INIT 0x1000
+#define REQ_FEC_MODE GENMASK(23, 8)
+#define FEC_MODE_NO 0x0
+#define FEC_MODE_KR 0x5555
+#define FEC_MODE_RS 0xaaaa
+#define NIOS_INIT_START BIT(1)
+#define NIOS_INIT_DONE BIT(0)
+#define NIOS_VERSION 0x1004
+#define NIOS_VERSION_MAJOR_SHIFT 28
+#define NIOS_VERSION_MAJOR GENMASK(31, 28)
+#define NIOS_VERSION_MINOR GENMASK(27, 24)
+#define NIOS_VERSION_PATCH GENMASK(23, 20)
+#define PKVL_A_MODE_STS 0x1020
+#define PKVL_B_MODE_STS 0x1024
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c
new file mode 100644
index 000000000..013efee3e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/opae_spi_transaction.c
@@ -0,0 +1,498 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include "opae_spi.h"
+#include "ifpga_compat.h"
+
+/*transaction opcodes*/
+#define SPI_TRAN_SEQ_WRITE 0x04 /* SPI transaction sequential write */
+#define SPI_TRAN_SEQ_READ 0x14 /* SPI transaction sequential read */
+#define SPI_TRAN_NON_SEQ_WRITE 0x00 /* SPI transaction non-sequential write */
+#define SPI_TRAN_NON_SEQ_READ 0x10 /* SPI transaction non-sequential read*/
+
+/*specail packet characters*/
+#define SPI_PACKET_SOP 0x7a
+#define SPI_PACKET_EOP 0x7b
+#define SPI_PACKET_CHANNEL 0x7c
+#define SPI_PACKET_ESC 0x7d
+
+/*special byte characters*/
+#define SPI_BYTE_IDLE 0x4a
+#define SPI_BYTE_ESC 0x4d
+
+#define SPI_REG_BYTES 4
+
+#define INIT_SPI_TRAN_HEADER(trans_type, size, address) \
+({ \
+ header.trans_type = trans_type; \
+ header.reserve = 0; \
+ header.size = cpu_to_be16(size); \
+ header.addr = cpu_to_be32(addr); \
+})
+
+#ifdef OPAE_SPI_DEBUG
+static void print_buffer(const char *string, void *buffer, int len)
+{
+ int i;
+ unsigned char *p = buffer;
+
+ printf("%s print buffer, len=%d\n", string, len);
+
+ for (i = 0; i < len; i++)
+ printf("%x ", *(p+i));
+ printf("\n");
+}
+#else
+static void print_buffer(const char *string, void *buffer, int len)
+{
+ UNUSED(string);
+ UNUSED(buffer);
+ UNUSED(len);
+}
+#endif
+
+static unsigned char xor_20(unsigned char val)
+{
+ return val^0x20;
+}
+
+static void reorder_phy_data(u8 bits_per_word,
+ void *buf, unsigned int len)
+{
+ unsigned int count = len / (bits_per_word/8);
+ u32 *p;
+
+ if (bits_per_word == 32) {
+ p = (u32 *)buf;
+ while (count--) {
+ *p = cpu_to_be32(*p);
+ p++;
+ }
+ }
+}
+
+enum {
+ SPI_FOUND_SOP,
+ SPI_FOUND_EOP,
+ SPI_NOT_FOUND,
+};
+
+static int resp_find_sop_eop(unsigned char *resp, unsigned int len,
+ int flags)
+{
+ int ret = SPI_NOT_FOUND;
+
+ unsigned char *b = resp;
+
+ /* find SOP */
+ if (flags != SPI_FOUND_SOP) {
+ while (b < resp + len && *b != SPI_PACKET_SOP)
+ b++;
+
+ if (*b != SPI_PACKET_SOP)
+ goto done;
+
+ ret = SPI_FOUND_SOP;
+ }
+
+ /* find EOP */
+ while (b < resp + len && *b != SPI_PACKET_EOP)
+ b++;
+
+ if (*b != SPI_PACKET_EOP)
+ goto done;
+
+ ret = SPI_FOUND_EOP;
+
+done:
+ return ret;
+}
+
+static void phy_tx_pad(unsigned char *phy_buf, unsigned int phy_buf_len,
+ unsigned int *aligned_len)
+{
+ unsigned char *p = &phy_buf[phy_buf_len - 1], *dst_p;
+
+ *aligned_len = IFPGA_ALIGN(phy_buf_len, 4);
+
+ if (*aligned_len == phy_buf_len)
+ return;
+
+ dst_p = &phy_buf[*aligned_len - 1];
+
+ /* move EOP and bytes after EOP to the end of aligned size */
+ while (p > phy_buf) {
+ *dst_p = *p;
+
+ if (*p == SPI_PACKET_EOP)
+ break;
+
+ p--;
+ dst_p--;
+ }
+
+ /* fill the hole with PHY_IDLE */
+ while (p < dst_p)
+ *p++ = SPI_BYTE_IDLE;
+}
+
+static int byte_to_core_convert(struct spi_transaction_dev *dev,
+ unsigned int send_len, unsigned char *send_data,
+ unsigned int resp_len, unsigned char *resp_data,
+ unsigned int *valid_resp_len)
+{
+ unsigned int i;
+ int ret = 0;
+ unsigned char *send_packet = dev->buffer->bytes_send;
+ unsigned char *resp_packet = dev->buffer->bytes_resp;
+ unsigned char *p;
+ unsigned char current_byte;
+ unsigned char *tx_buffer;
+ unsigned int tx_len = 0;
+ unsigned char *rx_buffer;
+ unsigned int rx_len = 0;
+ int retry = 0;
+ int spi_flags;
+ unsigned long timeout = msecs_to_timer_cycles(1000);
+ unsigned long ticks;
+ unsigned int resp_max_len = 2 * resp_len;
+
+ print_buffer("before bytes:", send_data, send_len);
+
+ p = send_packet;
+
+ for (i = 0; i < send_len; i++) {
+ current_byte = send_data[i];
+ switch (current_byte) {
+ case SPI_BYTE_IDLE:
+ *p++ = SPI_BYTE_IDLE;
+ *p++ = xor_20(current_byte);
+ break;
+ case SPI_BYTE_ESC:
+ *p++ = SPI_BYTE_ESC;
+ *p++ = xor_20(current_byte);
+ break;
+ default:
+ *p++ = current_byte;
+ break;
+ }
+ }
+
+ tx_len = p - send_packet;
+
+ print_buffer("before spi:", send_packet, tx_len);
+
+ phy_tx_pad(send_packet, tx_len, &tx_len);
+ print_buffer("after pad:", send_packet, tx_len);
+
+ reorder_phy_data(32, send_packet, tx_len);
+
+ print_buffer("after order to spi:", send_packet, tx_len);
+
+ /* call spi */
+ tx_buffer = send_packet;
+ rx_buffer = resp_packet;
+ rx_len = resp_max_len;
+ spi_flags = SPI_NOT_FOUND;
+
+read_again:
+ ret = spi_command(dev->dev, dev->chipselect, tx_len, tx_buffer,
+ rx_len, rx_buffer);
+ if (ret)
+ return -EBUSY;
+
+ print_buffer("read from spi:", rx_buffer, rx_len);
+
+ /* look for SOP firstly*/
+ ret = resp_find_sop_eop(rx_buffer, rx_len - 1, spi_flags);
+ if (ret != SPI_FOUND_EOP) {
+ tx_buffer = NULL;
+ tx_len = 0;
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, timeout) &&
+ retry++ > SPI_MAX_RETRY) {
+ dev_err(NULL, "Have retry %d, found invalid packet data\n",
+ retry);
+ return -EBUSY;
+ }
+
+ if (ret == SPI_FOUND_SOP) {
+ rx_buffer += rx_len;
+ resp_max_len += rx_len;
+ }
+
+ spi_flags = ret;
+ goto read_again;
+ }
+
+ print_buffer("found valid data:", resp_packet, resp_max_len);
+
+ /* analyze response packet */
+ i = 0;
+ p = resp_data;
+ while (i < resp_max_len) {
+ current_byte = resp_packet[i];
+ switch (current_byte) {
+ case SPI_BYTE_IDLE:
+ i++;
+ break;
+ case SPI_BYTE_ESC:
+ i++;
+ current_byte = resp_packet[i];
+ *p++ = xor_20(current_byte);
+ i++;
+ break;
+ default:
+ *p++ = current_byte;
+ i++;
+ break;
+ }
+ }
+
+ /* receive "4a" means the SPI is idle, not valid data */
+ *valid_resp_len = p - resp_data;
+ if (*valid_resp_len == 0) {
+ dev_err(NULL, "error: repond package without valid data\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int packet_to_byte_conver(struct spi_transaction_dev *dev,
+ unsigned int send_len, unsigned char *send_buf,
+ unsigned int resp_len, unsigned char *resp_buf,
+ unsigned int *valid)
+{
+ int ret = 0;
+ unsigned int i;
+ unsigned char current_byte;
+ unsigned int resp_max_len;
+ unsigned char *send_packet = dev->buffer->packet_send;
+ unsigned char *resp_packet = dev->buffer->packet_resp;
+ unsigned char *p;
+ unsigned int valid_resp_len = 0;
+
+ print_buffer("before packet:", send_buf, send_len);
+
+ resp_max_len = 2 * resp_len + 4;
+
+ p = send_packet;
+
+ /* SOP header */
+ *p++ = SPI_PACKET_SOP;
+
+ *p++ = SPI_PACKET_CHANNEL;
+ *p++ = 0;
+
+ /* append the data into a packet */
+ for (i = 0; i < send_len; i++) {
+ current_byte = send_buf[i];
+
+ /* EOP for last byte */
+ if (i == send_len - 1)
+ *p++ = SPI_PACKET_EOP;
+
+ switch (current_byte) {
+ case SPI_PACKET_SOP:
+ case SPI_PACKET_EOP:
+ case SPI_PACKET_CHANNEL:
+ case SPI_PACKET_ESC:
+ *p++ = SPI_PACKET_ESC;
+ *p++ = xor_20(current_byte);
+ break;
+ default:
+ *p++ = current_byte;
+ }
+ }
+
+ ret = byte_to_core_convert(dev, p - send_packet,
+ send_packet, resp_max_len, resp_packet,
+ &valid_resp_len);
+ if (ret)
+ return -EBUSY;
+
+ print_buffer("after byte conver:", resp_packet, valid_resp_len);
+
+ /* analyze the response packet */
+ p = resp_buf;
+
+ /* look for SOP */
+ for (i = 0; i < valid_resp_len; i++) {
+ if (resp_packet[i] == SPI_PACKET_SOP)
+ break;
+ }
+
+ if (i == valid_resp_len) {
+ dev_err(NULL, "error on analyze response packet 0x%x\n",
+ resp_packet[i]);
+ return -EINVAL;
+ }
+
+ i++;
+
+ /* continue parsing data after SOP */
+ while (i < valid_resp_len) {
+ current_byte = resp_packet[i];
+
+ switch (current_byte) {
+ case SPI_PACKET_ESC:
+ case SPI_PACKET_CHANNEL:
+ case SPI_PACKET_SOP:
+ i++;
+ current_byte = resp_packet[i];
+ *p++ = xor_20(current_byte);
+ i++;
+ break;
+ case SPI_PACKET_EOP:
+ i++;
+ current_byte = resp_packet[i];
+ if (current_byte == SPI_PACKET_ESC ||
+ current_byte == SPI_PACKET_CHANNEL ||
+ current_byte == SPI_PACKET_SOP) {
+ i++;
+ current_byte = resp_packet[i];
+ *p++ = xor_20(current_byte);
+ } else
+ *p++ = current_byte;
+ i = valid_resp_len;
+ break;
+ default:
+ *p++ = current_byte;
+ i++;
+ }
+
+ }
+
+ *valid = p - resp_buf;
+
+ print_buffer("after packet:", resp_buf, *valid);
+
+ return ret;
+}
+
+static int do_transaction(struct spi_transaction_dev *dev, unsigned int addr,
+ unsigned int size, unsigned char *data,
+ unsigned int trans_type)
+{
+
+ struct spi_tran_header header;
+ unsigned char *transaction = dev->buffer->tran_send;
+ unsigned char *response = dev->buffer->tran_resp;
+ unsigned char *p;
+ int ret = 0;
+ unsigned int i;
+ unsigned int valid_len = 0;
+
+ /* make transacation header */
+ INIT_SPI_TRAN_HEADER(trans_type, size, addr);
+
+ /* fill the header */
+ p = transaction;
+ opae_memcpy(p, &header, sizeof(struct spi_tran_header));
+ p = p + sizeof(struct spi_tran_header);
+
+ switch (trans_type) {
+ case SPI_TRAN_SEQ_WRITE:
+ case SPI_TRAN_NON_SEQ_WRITE:
+ for (i = 0; i < size; i++)
+ *p++ = *data++;
+
+ ret = packet_to_byte_conver(dev, size + HEADER_LEN,
+ transaction, RESPONSE_LEN, response,
+ &valid_len);
+ if (ret)
+ return -EBUSY;
+
+ /* check the result */
+ if (size != ((unsigned int)(response[2] & 0xff) << 8 |
+ (unsigned int)(response[3] & 0xff)))
+ ret = -EBUSY;
+
+ break;
+ case SPI_TRAN_SEQ_READ:
+ case SPI_TRAN_NON_SEQ_READ:
+ ret = packet_to_byte_conver(dev, HEADER_LEN,
+ transaction, size, response,
+ &valid_len);
+ if (ret || valid_len != size)
+ return -EBUSY;
+
+ for (i = 0; i < size; i++)
+ *data++ = *response++;
+
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+int spi_transaction_read(struct spi_transaction_dev *dev, unsigned int addr,
+ unsigned int size, unsigned char *data)
+{
+ int ret;
+
+ pthread_mutex_lock(&dev->lock);
+ ret = do_transaction(dev, addr, size, data,
+ (size > SPI_REG_BYTES) ?
+ SPI_TRAN_SEQ_READ : SPI_TRAN_NON_SEQ_READ);
+ pthread_mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+int spi_transaction_write(struct spi_transaction_dev *dev, unsigned int addr,
+ unsigned int size, unsigned char *data)
+{
+ int ret;
+
+ pthread_mutex_lock(&dev->lock);
+ ret = do_transaction(dev, addr, size, data,
+ (size > SPI_REG_BYTES) ?
+ SPI_TRAN_SEQ_WRITE : SPI_TRAN_NON_SEQ_WRITE);
+ pthread_mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+struct spi_transaction_dev *spi_transaction_init(struct altera_spi_device *dev,
+ int chipselect)
+{
+ struct spi_transaction_dev *spi_tran_dev;
+ int ret;
+
+ spi_tran_dev = opae_malloc(sizeof(struct spi_transaction_dev));
+ if (!spi_tran_dev)
+ return NULL;
+
+ spi_tran_dev->dev = dev;
+ spi_tran_dev->chipselect = chipselect;
+
+ spi_tran_dev->buffer = opae_malloc(sizeof(struct spi_tran_buffer));
+ if (!spi_tran_dev->buffer)
+ goto err;
+
+ ret = pthread_mutex_init(&spi_tran_dev->lock, NULL);
+ if (ret) {
+ dev_err(spi_tran_dev, "fail to init mutex lock\n");
+ goto err;
+ }
+
+ return spi_tran_dev;
+
+err:
+ opae_free(spi_tran_dev);
+ return NULL;
+}
+
+void spi_transaction_remove(struct spi_transaction_dev *dev)
+{
+ if (dev && dev->buffer)
+ opae_free(dev->buffer);
+ if (dev) {
+ pthread_mutex_destroy(&dev->lock);
+ opae_free(dev);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/osdep_raw/osdep_generic.h b/src/spdk/dpdk/drivers/raw/ifpga/base/osdep_raw/osdep_generic.h
new file mode 100644
index 000000000..6769109c7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/osdep_raw/osdep_generic.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RAW_GENERIC_H
+#define _OSDEP_RAW_GENERIC_H
+
+#define compiler_barrier() (asm volatile ("" : : : "memory"))
+
+#define io_wmb() compiler_barrier()
+#define io_rmb() compiler_barrier()
+
+static inline uint8_t opae_readb(const volatile void *addr)
+{
+ uint8_t val;
+
+ val = *(const volatile uint8_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint16_t opae_readw(const volatile void *addr)
+{
+ uint16_t val;
+
+ val = *(const volatile uint16_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint32_t opae_readl(const volatile void *addr)
+{
+ uint32_t val;
+
+ val = *(const volatile uint32_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint64_t opae_readq(const volatile void *addr)
+{
+ uint64_t val;
+
+ val = *(const volatile uint64_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline void opae_writeb(uint8_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline void opae_writew(uint16_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline void opae_writel(uint32_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline void opae_writeq(uint64_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint64_t *)addr = value;
+}
+
+#define opae_free(addr) free(addr)
+#define opae_memcpy(a, b, c) memcpy((a), (b), (c))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/base/osdep_rte/osdep_generic.h b/src/spdk/dpdk/drivers/raw/ifpga/base/osdep_rte/osdep_generic.h
new file mode 100644
index 000000000..3ff49a892
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/base/osdep_rte/osdep_generic.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RTE_GENERIC_H
+#define _OSDEP_RTE_GENERIC_H
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
+
+#define osdep_panic(...) rte_panic(...)
+
+#define opae_udelay(x) rte_delay_us(x)
+
+#define opae_readb(addr) rte_read8(addr)
+#define opae_readw(addr) rte_read16(addr)
+#define opae_readl(addr) rte_read32(addr)
+#define opae_readq(addr) rte_read64(addr)
+#define opae_writeb(value, addr) rte_write8(value, addr)
+#define opae_writew(value, addr) rte_write16(value, addr)
+#define opae_writel(value, addr) rte_write32(value, addr)
+#define opae_writeq(value, addr) rte_write64(value, addr)
+
+#define opae_malloc(size) rte_malloc(NULL, size, 0)
+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
+#define opae_free(addr) rte_free(addr)
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define spinlock_t rte_spinlock_t
+#define spinlock_init(x) rte_spinlock_init(x)
+#define spinlock_lock(x) rte_spinlock_lock(x)
+#define spinlock_unlock(x) rte_spinlock_unlock(x)
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le16(o) rte_cpu_to_le_16(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define cpu_to_le64(o) rte_cpu_to_le_64(o)
+
+#define opae_memcpy(a, b, c) rte_memcpy((a), (b), (c))
+
+static inline unsigned long msecs_to_timer_cycles(unsigned int m)
+{
+ return rte_get_timer_hz() * (m / 1000);
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.c b/src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.c
new file mode 100644
index 000000000..b8701e155
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.c
@@ -0,0 +1,1731 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+#include <rte_interrupts.h>
+#include <rte_errno.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+#include <rte_string_fns.h>
+#include <rte_pmd_i40e.h>
+
+#include "base/opae_hw_api.h"
+#include "base/opae_ifpga_hw_api.h"
+#include "base/ifpga_api.h"
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_common.h"
+#include "ifpga_logs.h"
+#include "ifpga_rawdev.h"
+#include "ipn3ke_rawdev_api.h"
+
+#define RTE_PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */
+#define RTE_PCI_CFG_SPACE_SIZE 256
+#define RTE_PCI_CFG_SPACE_EXP_SIZE 4096
+#define RTE_PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff)
+#define RTE_PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
+
+int ifpga_rawdev_logtype;
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+#define PCIE_DEVICE_ID_PAC_N3000 0x0B30
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define PCIE_DEVICE_ID_VF_PAC_N3000 0x0B31
+#define RTE_MAX_RAW_DEVICE 10
+
+static const struct rte_pci_id pci_ifpga_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PAC_N3000),},
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_PAC_N3000),},
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
+
+static int ifpga_monitor_start;
+static pthread_t ifpga_monitor_start_thread;
+
+#define IFPGA_MAX_IRQ 12
+/* 0 for FME interrupt, others are reserved for AFU irq */
+static struct rte_intr_handle ifpga_irq_handle[IFPGA_MAX_IRQ];
+
+static struct ifpga_rawdev *
+ifpga_rawdev_allocate(struct rte_rawdev *rawdev);
+static int set_surprise_link_check_aer(
+ struct ifpga_rawdev *ifpga_rdev, int force_disable);
+static int ifpga_pci_find_next_ext_capability(unsigned int fd,
+ int start, int cap);
+static int ifpga_pci_find_ext_capability(unsigned int fd, int cap);
+
+struct ifpga_rawdev *
+ifpga_rawdev_get(const struct rte_rawdev *rawdev)
+{
+ struct ifpga_rawdev *dev;
+ unsigned int i;
+
+ if (rawdev == NULL)
+ return NULL;
+
+ for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
+ dev = &ifpga_rawdevices[i];
+ if (dev->rawdev == rawdev)
+ return dev;
+ }
+
+ return NULL;
+}
+
+static inline uint8_t
+ifpga_rawdev_find_free_device_index(void)
+{
+ uint16_t dev_id;
+
+ for (dev_id = 0; dev_id < IFPGA_RAWDEV_NUM; dev_id++) {
+ if (ifpga_rawdevices[dev_id].rawdev == NULL)
+ return dev_id;
+ }
+
+ return IFPGA_RAWDEV_NUM;
+}
+static struct ifpga_rawdev *
+ifpga_rawdev_allocate(struct rte_rawdev *rawdev)
+{
+ struct ifpga_rawdev *dev;
+ uint16_t dev_id;
+
+ dev = ifpga_rawdev_get(rawdev);
+ if (dev != NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Event device already allocated!");
+ return NULL;
+ }
+
+ dev_id = ifpga_rawdev_find_free_device_index();
+ if (dev_id == IFPGA_RAWDEV_NUM) {
+ IFPGA_RAWDEV_PMD_ERR("Reached maximum number of raw devices");
+ return NULL;
+ }
+
+ dev = &ifpga_rawdevices[dev_id];
+ dev->rawdev = rawdev;
+ dev->dev_id = dev_id;
+
+ return dev;
+}
+
+static int ifpga_pci_find_next_ext_capability(unsigned int fd,
+int start, int cap)
+{
+ uint32_t header;
+ int ttl;
+ int pos = RTE_PCI_CFG_SPACE_SIZE;
+ int ret;
+
+ /* minimum 8 bytes per capability */
+ ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8;
+
+ if (start)
+ pos = start;
+ ret = pread(fd, &header, sizeof(header), pos);
+ if (ret == -1)
+ return -1;
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (RTE_PCI_EXT_CAP_ID(header) == cap && pos != start)
+ return pos;
+
+ pos = RTE_PCI_EXT_CAP_NEXT(header);
+ if (pos < RTE_PCI_CFG_SPACE_SIZE)
+ break;
+ ret = pread(fd, &header, sizeof(header), pos);
+ if (ret == -1)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ifpga_pci_find_ext_capability(unsigned int fd, int cap)
+{
+ return ifpga_pci_find_next_ext_capability(fd, 0, cap);
+}
+
+static int ifpga_get_dev_vendor_id(const char *bdf,
+ uint32_t *dev_id, uint32_t *vendor_id)
+{
+ int fd;
+ char path[1024];
+ int ret;
+ uint32_t header;
+
+ strlcpy(path, "/sys/bus/pci/devices/", sizeof(path));
+ strlcat(path, bdf, sizeof(path));
+ strlcat(path, "/config", sizeof(path));
+ fd = open(path, O_RDWR);
+ if (fd < 0)
+ return -1;
+ ret = pread(fd, &header, sizeof(header), 0);
+ if (ret == -1) {
+ close(fd);
+ return -1;
+ }
+ (*vendor_id) = header & 0xffff;
+ (*dev_id) = (header >> 16) & 0xffff;
+ close(fd);
+
+ return 0;
+}
+static int ifpga_rawdev_fill_info(struct ifpga_rawdev *ifpga_dev,
+ const char *bdf)
+{
+ char path[1024] = "/sys/bus/pci/devices/0000:";
+ char link[1024], link1[1024];
+ char dir[1024] = "/sys/devices/";
+ char *c;
+ int ret;
+ char sub_brg_bdf[4][16];
+ int point;
+ DIR *dp = NULL;
+ struct dirent *entry;
+ int i, j;
+
+ unsigned int dom, bus, dev;
+ int func;
+ uint32_t dev_id, vendor_id;
+
+ strlcat(path, bdf, sizeof(path));
+ memset(link, 0, sizeof(link));
+ memset(link1, 0, sizeof(link1));
+ ret = readlink(path, link, (sizeof(link)-1));
+ if (ret == -1)
+ return -1;
+ strlcpy(link1, link, sizeof(link1));
+ memset(ifpga_dev->parent_bdf, 0, 16);
+ point = strlen(link);
+ if (point < 39)
+ return -1;
+ point -= 39;
+ link[point] = 0;
+ if (point < 12)
+ return -1;
+ point -= 12;
+ rte_memcpy(ifpga_dev->parent_bdf, &link[point], 12);
+
+ point = strlen(link1);
+ if (point < 26)
+ return -1;
+ point -= 26;
+ link1[point] = 0;
+ if (point < 12)
+ return -1;
+ point -= 12;
+ c = strchr(link1, 'p');
+ if (!c)
+ return -1;
+ strlcat(dir, c, sizeof(dir));
+
+ /* scan folder */
+ dp = opendir(dir);
+ if (dp == NULL)
+ return -1;
+ i = 0;
+ while ((entry = readdir(dp)) != NULL) {
+ if (i >= 4)
+ break;
+ if (entry->d_name[0] == '.')
+ continue;
+ if (strlen(entry->d_name) > 12)
+ continue;
+ if (sscanf(entry->d_name, "%x:%x:%x.%d",
+ &dom, &bus, &dev, &func) < 4)
+ continue;
+ else {
+ strlcpy(sub_brg_bdf[i],
+ entry->d_name,
+ sizeof(sub_brg_bdf[i]));
+ i++;
+ }
+ }
+ closedir(dp);
+
+ /* get fpga and fvl */
+ j = 0;
+ for (i = 0; i < 4; i++) {
+ strlcpy(link, dir, sizeof(link));
+ strlcat(link, "/", sizeof(link));
+ strlcat(link, sub_brg_bdf[i], sizeof(link));
+ dp = opendir(link);
+ if (dp == NULL)
+ return -1;
+ while ((entry = readdir(dp)) != NULL) {
+ if (j >= 8)
+ break;
+ if (entry->d_name[0] == '.')
+ continue;
+
+ if (strlen(entry->d_name) > 12)
+ continue;
+ if (sscanf(entry->d_name, "%x:%x:%x.%d",
+ &dom, &bus, &dev, &func) < 4)
+ continue;
+ else {
+ if (ifpga_get_dev_vendor_id(entry->d_name,
+ &dev_id, &vendor_id))
+ continue;
+ if (vendor_id == 0x8086 &&
+ (dev_id == 0x0CF8 ||
+ dev_id == 0x0D58 ||
+ dev_id == 0x1580)) {
+ strlcpy(ifpga_dev->fvl_bdf[j],
+ entry->d_name,
+ sizeof(ifpga_dev->fvl_bdf[j]));
+ j++;
+ }
+ }
+ }
+ closedir(dp);
+ }
+
+ return 0;
+}
+
+#define HIGH_FATAL(_sens, value)\
+ (((_sens)->flags & OPAE_SENSOR_HIGH_FATAL_VALID) &&\
+ (value > (_sens)->high_fatal))
+
+#define HIGH_WARN(_sens, value)\
+ (((_sens)->flags & OPAE_SENSOR_HIGH_WARN_VALID) &&\
+ (value > (_sens)->high_warn))
+
+#define LOW_FATAL(_sens, value)\
+ (((_sens)->flags & OPAE_SENSOR_LOW_FATAL_VALID) &&\
+ (value > (_sens)->low_fatal))
+
+#define LOW_WARN(_sens, value)\
+ (((_sens)->flags & OPAE_SENSOR_LOW_WARN_VALID) &&\
+ (value > (_sens)->low_warn))
+
+#define AUX_VOLTAGE_WARN 11400
+
+static int
+ifpga_monitor_sensor(struct rte_rawdev *raw_dev,
+ bool *gsd_start)
+{
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_sensor_info *sensor;
+ unsigned int value;
+ int ret;
+
+ adapter = ifpga_rawdev_get_priv(raw_dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ opae_mgr_for_each_sensor(mgr, sensor) {
+ if (!(sensor->flags & OPAE_SENSOR_VALID))
+ goto fail;
+
+ ret = opae_mgr_get_sensor_value(mgr, sensor, &value);
+ if (ret)
+ goto fail;
+
+ if (value == 0xdeadbeef) {
+ IFPGA_RAWDEV_PMD_ERR("dev_id %d sensor %s value %x\n",
+ raw_dev->dev_id, sensor->name, value);
+ continue;
+ }
+
+ /* monitor temperature sensors */
+ if (!strcmp(sensor->name, "Board Temperature") ||
+ !strcmp(sensor->name, "FPGA Die Temperature")) {
+ IFPGA_RAWDEV_PMD_INFO("read sensor %s %d %d %d\n",
+ sensor->name, value, sensor->high_warn,
+ sensor->high_fatal);
+
+ if (HIGH_WARN(sensor, value) ||
+ LOW_WARN(sensor, value)) {
+ IFPGA_RAWDEV_PMD_INFO("%s reach theshold %d\n",
+ sensor->name, value);
+ *gsd_start = true;
+ break;
+ }
+ }
+
+ /* monitor 12V AUX sensor */
+ if (!strcmp(sensor->name, "12V AUX Voltage")) {
+ if (value < AUX_VOLTAGE_WARN) {
+ IFPGA_RAWDEV_PMD_INFO(
+ "%s reach theshold %d mV\n",
+ sensor->name, value);
+ *gsd_start = true;
+ break;
+ }
+ }
+ }
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int set_surprise_link_check_aer(
+ struct ifpga_rawdev *ifpga_rdev, int force_disable)
+{
+ struct rte_rawdev *rdev;
+ int fd = -1;
+ char path[1024];
+ int pos;
+ int ret;
+ uint32_t data;
+ bool enable = 0;
+ uint32_t aer_new0, aer_new1;
+
+ if (!ifpga_rdev) {
+ printf("\n device does not exist\n");
+ return -EFAULT;
+ }
+
+ rdev = ifpga_rdev->rawdev;
+ if (ifpga_rdev->aer_enable)
+ return -EFAULT;
+ if (ifpga_monitor_sensor(rdev, &enable))
+ return -EFAULT;
+ if (enable || force_disable) {
+ IFPGA_RAWDEV_PMD_ERR("Set AER, pls graceful shutdown\n");
+ ifpga_rdev->aer_enable = 1;
+ /* get bridge fd */
+ strlcpy(path, "/sys/bus/pci/devices/", sizeof(path));
+ strlcat(path, ifpga_rdev->parent_bdf, sizeof(path));
+ strlcat(path, "/config", sizeof(path));
+ fd = open(path, O_RDWR);
+ if (fd < 0)
+ goto end;
+ pos = ifpga_pci_find_ext_capability(fd, RTE_PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ goto end;
+ /* save previout ECAP_AER+0x08 */
+ ret = pread(fd, &data, sizeof(data), pos+0x08);
+ if (ret == -1)
+ goto end;
+ ifpga_rdev->aer_old[0] = data;
+ /* save previout ECAP_AER+0x14 */
+ ret = pread(fd, &data, sizeof(data), pos+0x14);
+ if (ret == -1)
+ goto end;
+ ifpga_rdev->aer_old[1] = data;
+
+ /* set ECAP_AER+0x08 to 0xFFFFFFFF */
+ data = 0xffffffff;
+ ret = pwrite(fd, &data, 4, pos+0x08);
+ if (ret == -1)
+ goto end;
+ /* set ECAP_AER+0x14 to 0xFFFFFFFF */
+ ret = pwrite(fd, &data, 4, pos+0x14);
+ if (ret == -1)
+ goto end;
+
+ /* read current ECAP_AER+0x08 */
+ ret = pread(fd, &data, sizeof(data), pos+0x08);
+ if (ret == -1)
+ goto end;
+ aer_new0 = data;
+ /* read current ECAP_AER+0x14 */
+ ret = pread(fd, &data, sizeof(data), pos+0x14);
+ if (ret == -1)
+ goto end;
+ aer_new1 = data;
+
+ if (fd != -1)
+ close(fd);
+
+ printf(">>>>>>Set AER %x,%x %x,%x\n",
+ ifpga_rdev->aer_old[0], ifpga_rdev->aer_old[1],
+ aer_new0, aer_new1);
+
+ return 1;
+ }
+
+end:
+ if (fd != -1)
+ close(fd);
+ return -EFAULT;
+}
+
+static void *
+ifpga_rawdev_gsd_handle(__rte_unused void *param)
+{
+ struct ifpga_rawdev *ifpga_rdev;
+ int i;
+ int gsd_enable, ret;
+#define MS 1000
+
+ while (1) {
+ gsd_enable = 0;
+ for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
+ ifpga_rdev = &ifpga_rawdevices[i];
+ if (ifpga_rdev->rawdev) {
+ ret = set_surprise_link_check_aer(ifpga_rdev,
+ gsd_enable);
+ if (ret == 1 && !gsd_enable) {
+ gsd_enable = 1;
+ i = -1;
+ }
+ }
+ }
+
+ if (gsd_enable)
+ printf(">>>>>>Pls Shutdown APP\n");
+
+ rte_delay_us(100 * MS);
+ }
+
+ return NULL;
+}
+
+static int
+ifpga_monitor_start_func(void)
+{
+ int ret;
+
+ if (ifpga_monitor_start == 0) {
+ ret = pthread_create(&ifpga_monitor_start_thread,
+ NULL,
+ ifpga_rawdev_gsd_handle, NULL);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR(
+ "Fail to create ifpga nonitor thread");
+ return -1;
+ }
+ ifpga_monitor_start = 1;
+ }
+
+ return 0;
+}
+static int
+ifpga_monitor_stop_func(void)
+{
+ int ret;
+
+ if (ifpga_monitor_start == 1) {
+ ret = pthread_cancel(ifpga_monitor_start_thread);
+ if (ret)
+ IFPGA_RAWDEV_PMD_ERR("Can't cancel the thread");
+
+ ret = pthread_join(ifpga_monitor_start_thread, NULL);
+ if (ret)
+ IFPGA_RAWDEV_PMD_ERR("Can't join the thread");
+
+ ifpga_monitor_start = 0;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ifpga_fill_afu_dev(struct opae_accelerator *acc,
+ struct rte_afu_device *afu_dev)
+{
+ struct rte_mem_resource *res = afu_dev->mem_resource;
+ struct opae_acc_region_info region_info;
+ struct opae_acc_info info;
+ unsigned long i;
+ int ret;
+
+ ret = opae_acc_get_info(acc, &info);
+ if (ret)
+ return ret;
+
+ if (info.num_regions > PCI_MAX_RESOURCE)
+ return -EFAULT;
+
+ afu_dev->num_region = info.num_regions;
+
+ for (i = 0; i < info.num_regions; i++) {
+ region_info.index = i;
+ ret = opae_acc_get_region_info(acc, &region_info);
+ if (ret)
+ return ret;
+
+ if ((region_info.flags & ACC_REGION_MMIO) &&
+ (region_info.flags & ACC_REGION_READ) &&
+ (region_info.flags & ACC_REGION_WRITE)) {
+ res[i].phys_addr = region_info.phys_addr;
+ res[i].len = region_info.len;
+ res[i].addr = region_info.addr;
+ } else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void
+ifpga_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct opae_adapter *adapter;
+ struct opae_accelerator *acc;
+ struct rte_afu_device *afu_dev;
+ struct opae_manager *mgr = NULL;
+ struct opae_eth_group_region_info opae_lside_eth_info;
+ struct opae_eth_group_region_info opae_nside_eth_info;
+ int lside_bar_idx, nside_bar_idx;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid request");
+ return;
+ }
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return;
+
+ afu_dev = dev_info;
+ afu_dev->rawdev = dev;
+
+ /* find opae_accelerator and fill info into afu_device */
+ opae_adapter_for_each_acc(adapter, acc) {
+ if (acc->index != afu_dev->id.port)
+ continue;
+
+ if (ifpga_fill_afu_dev(acc, afu_dev)) {
+ IFPGA_RAWDEV_PMD_ERR("cannot get info\n");
+ return;
+ }
+ }
+
+ /* get opae_manager to rawdev */
+ mgr = opae_adapter_get_mgr(adapter);
+ if (mgr) {
+ /* get LineSide BAR Index */
+ if (opae_manager_get_eth_group_region_info(mgr, 0,
+ &opae_lside_eth_info)) {
+ return;
+ }
+ lside_bar_idx = opae_lside_eth_info.mem_idx;
+
+ /* get NICSide BAR Index */
+ if (opae_manager_get_eth_group_region_info(mgr, 1,
+ &opae_nside_eth_info)) {
+ return;
+ }
+ nside_bar_idx = opae_nside_eth_info.mem_idx;
+
+ if (lside_bar_idx >= PCI_MAX_RESOURCE ||
+ nside_bar_idx >= PCI_MAX_RESOURCE ||
+ lside_bar_idx == nside_bar_idx)
+ return;
+
+ /* fill LineSide BAR Index */
+ afu_dev->mem_resource[lside_bar_idx].phys_addr =
+ opae_lside_eth_info.phys_addr;
+ afu_dev->mem_resource[lside_bar_idx].len =
+ opae_lside_eth_info.len;
+ afu_dev->mem_resource[lside_bar_idx].addr =
+ opae_lside_eth_info.addr;
+
+ /* fill NICSide BAR Index */
+ afu_dev->mem_resource[nside_bar_idx].phys_addr =
+ opae_nside_eth_info.phys_addr;
+ afu_dev->mem_resource[nside_bar_idx].len =
+ opae_nside_eth_info.len;
+ afu_dev->mem_resource[nside_bar_idx].addr =
+ opae_nside_eth_info.addr;
+ }
+}
+
+static int
+ifpga_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ return config ? 0 : 1;
+}
+
+static int
+ifpga_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct opae_adapter *adapter;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ return ret;
+}
+
+static void
+ifpga_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = 0;
+}
+
+static int
+ifpga_rawdev_close(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+ifpga_rawdev_reset(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, const char *buffer, u32 size,
+ u64 *status)
+{
+
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_accelerator *acc;
+ struct opae_bridge *br;
+ int ret;
+
+ adapter = ifpga_rawdev_get_priv(raw_dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ acc = opae_adapter_get_acc(adapter, port_id);
+ if (!acc)
+ return -ENODEV;
+
+ br = opae_acc_get_br(acc);
+ if (!br)
+ return -ENODEV;
+
+ ret = opae_manager_flash(mgr, port_id, buffer, size, status);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s pr error %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = opae_bridge_reset(br);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d\n",
+ __func__, port_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id,
+ const char *file_name)
+{
+ struct stat file_stat;
+ int file_fd;
+ int ret = 0;
+ ssize_t buffer_size;
+ void *buffer;
+ u64 pr_error;
+
+ if (!file_name)
+ return -EINVAL;
+
+ file_fd = open(file_name, O_RDONLY);
+ if (file_fd < 0) {
+ IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s\n",
+ __func__, file_name);
+ IFPGA_RAWDEV_PMD_ERR("Message : %s\n", strerror(errno));
+ return -EINVAL;
+ }
+ ret = stat(file_name, &file_stat);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n",
+ file_name);
+ ret = -EINVAL;
+ goto close_fd;
+ }
+ buffer_size = file_stat.st_size;
+ if (buffer_size <= 0) {
+ ret = -EINVAL;
+ goto close_fd;
+ }
+
+ IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size);
+ buffer = rte_malloc(NULL, buffer_size, 0);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto close_fd;
+ }
+
+ /*read the raw data*/
+ if (buffer_size != read(file_fd, (void *)buffer, buffer_size)) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+ /*do PR now*/
+ ret = fpga_pr(rawdev, port_id, buffer, buffer_size, &pr_error);
+ IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.\n", port_id,
+ ret ? "failed" : "success");
+ if (ret) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+free_buffer:
+ if (buffer)
+ rte_free(buffer);
+close_fd:
+ close(file_fd);
+ file_fd = 0;
+ return ret;
+}
+
+static int
+ifpga_rawdev_pr(struct rte_rawdev *dev,
+ rte_rawdev_obj_t pr_conf)
+{
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_board_info *info;
+ struct rte_afu_pr_conf *afu_pr_conf;
+ int ret;
+ struct uuid uuid;
+ struct opae_accelerator *acc;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ if (!pr_conf)
+ return -EINVAL;
+
+ afu_pr_conf = pr_conf;
+
+ if (afu_pr_conf->pr_enable) {
+ ret = rte_fpga_do_pr(dev,
+ afu_pr_conf->afu_id.port,
+ afu_pr_conf->bs_path);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("do pr error %d\n", ret);
+ return ret;
+ }
+ }
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr) {
+ IFPGA_RAWDEV_PMD_ERR("opae_manager of opae_adapter is NULL");
+ return -1;
+ }
+
+ if (ifpga_mgr_ops.get_board_info(mgr, &info)) {
+ IFPGA_RAWDEV_PMD_ERR("ifpga manager get_board_info fail!");
+ return -1;
+ }
+
+ if (info->lightweight) {
+ /* set uuid to all 0, when fpga is lightweight image */
+ memset(&afu_pr_conf->afu_id.uuid.uuid_low, 0, sizeof(u64));
+ memset(&afu_pr_conf->afu_id.uuid.uuid_high, 0, sizeof(u64));
+ } else {
+ acc = opae_adapter_get_acc(adapter, afu_pr_conf->afu_id.port);
+ if (!acc)
+ return -ENODEV;
+
+ ret = opae_acc_get_uuid(acc, &uuid);
+ if (ret)
+ return ret;
+
+ rte_memcpy(&afu_pr_conf->afu_id.uuid.uuid_low, uuid.b,
+ sizeof(u64));
+ rte_memcpy(&afu_pr_conf->afu_id.uuid.uuid_high, uuid.b + 8,
+ sizeof(u64));
+
+ IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx\n",
+ __func__,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_low,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_high);
+ }
+ return 0;
+}
+
+static int
+ifpga_rawdev_get_attr(struct rte_rawdev *dev,
+ const char *attr_name, uint64_t *attr_value)
+{
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_retimer_info opae_rtm_info;
+ struct opae_retimer_status opae_rtm_status;
+ struct opae_eth_group_info opae_eth_grp_info;
+ struct opae_eth_group_region_info opae_eth_grp_reg_info;
+ int eth_group_num = 0;
+ uint64_t port_link_bitmap = 0, port_link_bit;
+ uint32_t i, j, p, q;
+
+#define MAX_PORT_PER_RETIMER 4
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name || !attr_value) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid arguments for getting attributes");
+ return -1;
+ }
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter) {
+ IFPGA_RAWDEV_PMD_ERR("Adapter of dev %s is NULL", dev->name);
+ return -1;
+ }
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr) {
+ IFPGA_RAWDEV_PMD_ERR("opae_manager of opae_adapter is NULL");
+ return -1;
+ }
+
+ /* currently, eth_group_num is always 2 */
+ eth_group_num = opae_manager_get_eth_group_nums(mgr);
+ if (eth_group_num < 0)
+ return -1;
+
+ if (!strcmp(attr_name, "LineSideBaseMAC")) {
+ /* Currently FPGA not implement, so just set all zeros*/
+ *attr_value = (uint64_t)0;
+ return 0;
+ }
+ if (!strcmp(attr_name, "LineSideMACType")) {
+ /* eth_group 0 on FPGA connect to LineSide */
+ if (opae_manager_get_eth_group_info(mgr, 0,
+ &opae_eth_grp_info))
+ return -1;
+ switch (opae_eth_grp_info.speed) {
+ case ETH_SPEED_10G:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI);
+ break;
+ case ETH_SPEED_25G:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI);
+ break;
+ default:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_RETIMER_MAC_TYPE_UNKNOWN);
+ break;
+ }
+ return 0;
+ }
+ if (!strcmp(attr_name, "LineSideLinkSpeed")) {
+ if (opae_manager_get_retimer_status(mgr, &opae_rtm_status))
+ return -1;
+ switch (opae_rtm_status.speed) {
+ case MXD_1GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_UNKNOWN);
+ break;
+ case MXD_2_5GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_UNKNOWN);
+ break;
+ case MXD_5GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_UNKNOWN);
+ break;
+ case MXD_10GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_10GB);
+ break;
+ case MXD_25GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_25GB);
+ break;
+ case MXD_40GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_40GB);
+ break;
+ case MXD_100GB:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_UNKNOWN);
+ break;
+ case MXD_SPEED_UNKNOWN:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_UNKNOWN);
+ break;
+ default:
+ *attr_value =
+ (uint64_t)(IFPGA_RAWDEV_LINK_SPEED_UNKNOWN);
+ break;
+ }
+ return 0;
+ }
+ if (!strcmp(attr_name, "LineSideLinkRetimerNum")) {
+ if (opae_manager_get_retimer_info(mgr, &opae_rtm_info))
+ return -1;
+ *attr_value = (uint64_t)(opae_rtm_info.nums_retimer);
+ return 0;
+ }
+ if (!strcmp(attr_name, "LineSideLinkPortNum")) {
+ if (opae_manager_get_retimer_info(mgr, &opae_rtm_info))
+ return -1;
+ uint64_t tmp = (uint64_t)opae_rtm_info.ports_per_retimer *
+ (uint64_t)opae_rtm_info.nums_retimer;
+ *attr_value = tmp;
+ return 0;
+ }
+ if (!strcmp(attr_name, "LineSideLinkStatus")) {
+ if (opae_manager_get_retimer_info(mgr, &opae_rtm_info))
+ return -1;
+ if (opae_manager_get_retimer_status(mgr, &opae_rtm_status))
+ return -1;
+ (*attr_value) = 0;
+ q = 0;
+ port_link_bitmap = (uint64_t)(opae_rtm_status.line_link_bitmap);
+ for (i = 0; i < opae_rtm_info.nums_retimer; i++) {
+ p = i * MAX_PORT_PER_RETIMER;
+ for (j = 0; j < opae_rtm_info.ports_per_retimer; j++) {
+ port_link_bit = 0;
+ IFPGA_BIT_SET(port_link_bit, (p+j));
+ port_link_bit &= port_link_bitmap;
+ if (port_link_bit)
+ IFPGA_BIT_SET((*attr_value), q);
+ q++;
+ }
+ }
+ return 0;
+ }
+ if (!strcmp(attr_name, "LineSideBARIndex")) {
+ /* eth_group 0 on FPGA connect to LineSide */
+ if (opae_manager_get_eth_group_region_info(mgr, 0,
+ &opae_eth_grp_reg_info))
+ return -1;
+ *attr_value = (uint64_t)opae_eth_grp_reg_info.mem_idx;
+ return 0;
+ }
+ if (!strcmp(attr_name, "NICSideMACType")) {
+ /* eth_group 1 on FPGA connect to NicSide */
+ if (opae_manager_get_eth_group_info(mgr, 1,
+ &opae_eth_grp_info))
+ return -1;
+ *attr_value = (uint64_t)(opae_eth_grp_info.speed);
+ return 0;
+ }
+ if (!strcmp(attr_name, "NICSideLinkSpeed")) {
+ /* eth_group 1 on FPGA connect to NicSide */
+ if (opae_manager_get_eth_group_info(mgr, 1,
+ &opae_eth_grp_info))
+ return -1;
+ *attr_value = (uint64_t)(opae_eth_grp_info.speed);
+ return 0;
+ }
+ if (!strcmp(attr_name, "NICSideLinkPortNum")) {
+ if (opae_manager_get_retimer_info(mgr, &opae_rtm_info))
+ return -1;
+ uint64_t tmp = (uint64_t)opae_rtm_info.nums_fvl *
+ (uint64_t)opae_rtm_info.ports_per_fvl;
+ *attr_value = tmp;
+ return 0;
+ }
+ if (!strcmp(attr_name, "NICSideLinkStatus"))
+ return 0;
+ if (!strcmp(attr_name, "NICSideBARIndex")) {
+ /* eth_group 1 on FPGA connect to NicSide */
+ if (opae_manager_get_eth_group_region_info(mgr, 1,
+ &opae_eth_grp_reg_info))
+ return -1;
+ *attr_value = (uint64_t)opae_eth_grp_reg_info.mem_idx;
+ return 0;
+ }
+
+ IFPGA_RAWDEV_PMD_ERR("%s not support", attr_name);
+ return -1;
+}
+
+static const struct rte_rawdev_ops ifpga_rawdev_ops = {
+ .dev_info_get = ifpga_rawdev_info_get,
+ .dev_configure = ifpga_rawdev_configure,
+ .dev_start = ifpga_rawdev_start,
+ .dev_stop = ifpga_rawdev_stop,
+ .dev_close = ifpga_rawdev_close,
+ .dev_reset = ifpga_rawdev_reset,
+
+ .queue_def_conf = NULL,
+ .queue_setup = NULL,
+ .queue_release = NULL,
+
+ .attr_get = ifpga_rawdev_get_attr,
+ .attr_set = NULL,
+
+ .enqueue_bufs = NULL,
+ .dequeue_bufs = NULL,
+
+ .dump = NULL,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = NULL,
+ .firmware_version_get = NULL,
+ .firmware_load = ifpga_rawdev_pr,
+ .firmware_unload = NULL,
+
+ .dev_selftest = NULL,
+};
+
+static int
+ifpga_get_fme_error_prop(struct opae_manager *mgr,
+ u64 prop_id, u64 *val)
+{
+ struct feature_prop prop;
+
+ prop.feature_id = IFPGA_FME_FEATURE_ID_GLOBAL_ERR;
+ prop.prop_id = prop_id;
+
+ if (opae_manager_ifpga_get_prop(mgr, &prop))
+ return -EINVAL;
+
+ *val = prop.data;
+
+ return 0;
+}
+
+static int
+ifpga_set_fme_error_prop(struct opae_manager *mgr,
+ u64 prop_id, u64 val)
+{
+ struct feature_prop prop;
+
+ prop.feature_id = IFPGA_FME_FEATURE_ID_GLOBAL_ERR;
+ prop.prop_id = prop_id;
+
+ prop.data = val;
+
+ if (opae_manager_ifpga_set_prop(mgr, &prop))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+fme_err_read_seu_emr(struct opae_manager *mgr)
+{
+ u64 val;
+ int ret;
+
+ ret = ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_SEU_EMR_LOW, &val);
+ if (ret)
+ return -EINVAL;
+
+ IFPGA_RAWDEV_PMD_INFO("seu emr low: 0x%" PRIx64 "\n", val);
+
+ ret = ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_SEU_EMR_HIGH, &val);
+ if (ret)
+ return -EINVAL;
+
+ IFPGA_RAWDEV_PMD_INFO("seu emr high: 0x%" PRIx64 "\n", val);
+
+ return 0;
+}
+
+static int fme_clear_warning_intr(struct opae_manager *mgr)
+{
+ u64 val;
+
+ if (ifpga_set_fme_error_prop(mgr, FME_ERR_PROP_INJECT_ERRORS, 0))
+ return -EINVAL;
+
+ if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_NONFATAL_ERRORS, &val))
+ return -EINVAL;
+ if ((val & 0x40) != 0)
+ IFPGA_RAWDEV_PMD_INFO("clean not done\n");
+
+ return 0;
+}
+
+static int fme_clean_fme_error(struct opae_manager *mgr)
+{
+ u64 val;
+
+ if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_ERRORS, &val))
+ return -EINVAL;
+
+ IFPGA_RAWDEV_PMD_DEBUG("before clean 0x%" PRIx64 "\n", val);
+
+ ifpga_set_fme_error_prop(mgr, FME_ERR_PROP_CLEAR, val);
+
+ if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_ERRORS, &val))
+ return -EINVAL;
+
+ IFPGA_RAWDEV_PMD_DEBUG("after clean 0x%" PRIx64 "\n", val);
+
+ return 0;
+}
+
+static int
+fme_err_handle_error0(struct opae_manager *mgr)
+{
+ struct feature_fme_error0 fme_error0;
+ u64 val;
+
+ if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_ERRORS, &val))
+ return -EINVAL;
+
+ if (fme_clean_fme_error(mgr))
+ return -EINVAL;
+
+ fme_error0.csr = val;
+
+ if (fme_error0.fabric_err)
+ IFPGA_RAWDEV_PMD_ERR("Fabric error\n");
+ else if (fme_error0.fabfifo_overflow)
+ IFPGA_RAWDEV_PMD_ERR("Fabric fifo under/overflow error\n");
+ else if (fme_error0.afu_acc_mode_err)
+ IFPGA_RAWDEV_PMD_ERR("AFU PF/VF access mismatch detected\n");
+ else if (fme_error0.pcie0cdc_parity_err)
+ IFPGA_RAWDEV_PMD_ERR("PCIe0 CDC Parity Error\n");
+ else if (fme_error0.cvlcdc_parity_err)
+ IFPGA_RAWDEV_PMD_ERR("CVL CDC Parity Error\n");
+ else if (fme_error0.fpgaseuerr)
+ fme_err_read_seu_emr(mgr);
+
+ /* clean the errors */
+ if (ifpga_set_fme_error_prop(mgr, FME_ERR_PROP_ERRORS, val))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+fme_err_handle_catfatal_error(struct opae_manager *mgr)
+{
+ struct feature_fme_ras_catfaterror fme_catfatal;
+ u64 val;
+
+ if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_CATFATAL_ERRORS, &val))
+ return -EINVAL;
+
+ fme_catfatal.csr = val;
+
+ if (fme_catfatal.cci_fatal_err)
+ IFPGA_RAWDEV_PMD_ERR("CCI error detected\n");
+ else if (fme_catfatal.fabric_fatal_err)
+ IFPGA_RAWDEV_PMD_ERR("Fabric fatal error detected\n");
+ else if (fme_catfatal.pcie_poison_err)
+ IFPGA_RAWDEV_PMD_ERR("Poison error from PCIe ports\n");
+ else if (fme_catfatal.inject_fata_err)
+ IFPGA_RAWDEV_PMD_ERR("Injected Fatal Error\n");
+ else if (fme_catfatal.crc_catast_err)
+ IFPGA_RAWDEV_PMD_ERR("a catastrophic EDCRC error\n");
+ else if (fme_catfatal.injected_catast_err)
+ IFPGA_RAWDEV_PMD_ERR("Injected Catastrophic Error\n");
+ else if (fme_catfatal.bmc_seu_catast_err)
+ fme_err_read_seu_emr(mgr);
+
+ return 0;
+}
+
+static int
+fme_err_handle_nonfaterror(struct opae_manager *mgr)
+{
+ struct feature_fme_ras_nonfaterror nonfaterr;
+ u64 val;
+
+ if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_NONFATAL_ERRORS, &val))
+ return -EINVAL;
+
+ nonfaterr.csr = val;
+
+ if (nonfaterr.temp_thresh_ap1)
+ IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP1\n");
+ else if (nonfaterr.temp_thresh_ap2)
+ IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP2\n");
+ else if (nonfaterr.pcie_error)
+ IFPGA_RAWDEV_PMD_INFO("an error has occurred in pcie\n");
+ else if (nonfaterr.portfatal_error)
+ IFPGA_RAWDEV_PMD_INFO("fatal error occurred in AFU port.\n");
+ else if (nonfaterr.proc_hot)
+ IFPGA_RAWDEV_PMD_INFO("a ProcHot event\n");
+ else if (nonfaterr.afu_acc_mode_err)
+ IFPGA_RAWDEV_PMD_INFO("an AFU PF/VF access mismatch\n");
+ else if (nonfaterr.injected_nonfata_err) {
+ IFPGA_RAWDEV_PMD_INFO("Injected Warning Error\n");
+ fme_clear_warning_intr(mgr);
+ } else if (nonfaterr.temp_thresh_AP6)
+ IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP6\n");
+ else if (nonfaterr.power_thresh_AP1)
+ IFPGA_RAWDEV_PMD_INFO("Power threshold triggered AP1\n");
+ else if (nonfaterr.power_thresh_AP2)
+ IFPGA_RAWDEV_PMD_INFO("Power threshold triggered AP2\n");
+ else if (nonfaterr.mbp_err)
+ IFPGA_RAWDEV_PMD_INFO("an MBP event\n");
+
+ return 0;
+}
+
+static void
+fme_interrupt_handler(void *param)
+{
+ struct opae_manager *mgr = (struct opae_manager *)param;
+
+ IFPGA_RAWDEV_PMD_INFO("%s interrupt occurred\n", __func__);
+
+ fme_err_handle_error0(mgr);
+ fme_err_handle_nonfaterror(mgr);
+ fme_err_handle_catfatal_error(mgr);
+}
+
+int
+ifpga_unregister_msix_irq(enum ifpga_irq_type type,
+ int vec_start, rte_intr_callback_fn handler, void *arg)
+{
+ struct rte_intr_handle intr_handle;
+
+ if (type == IFPGA_FME_IRQ)
+ intr_handle = ifpga_irq_handle[0];
+ else if (type == IFPGA_AFU_IRQ)
+ intr_handle = ifpga_irq_handle[vec_start + 1];
+
+ rte_intr_efd_disable(&intr_handle);
+
+ return rte_intr_callback_unregister(&intr_handle,
+ handler, arg);
+}
+
+int
+ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
+ enum ifpga_irq_type type, int vec_start, int count,
+ rte_intr_callback_fn handler, const char *name,
+ void *arg)
+{
+ int ret;
+ struct rte_intr_handle intr_handle;
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_accelerator *acc;
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ if (type == IFPGA_FME_IRQ) {
+ intr_handle = ifpga_irq_handle[0];
+ count = 1;
+ } else if (type == IFPGA_AFU_IRQ)
+ intr_handle = ifpga_irq_handle[vec_start + 1];
+
+ intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
+
+ ret = rte_intr_efd_enable(&intr_handle, count);
+ if (ret)
+ return -ENODEV;
+
+ intr_handle.fd = intr_handle.efds[0];
+
+ IFPGA_RAWDEV_PMD_DEBUG("register %s irq, vfio_fd=%d, fd=%d\n",
+ name, intr_handle.vfio_dev_fd,
+ intr_handle.fd);
+
+ if (type == IFPGA_FME_IRQ) {
+ struct fpga_fme_err_irq_set err_irq_set;
+ err_irq_set.evtfd = intr_handle.efds[0];
+
+ ret = opae_manager_ifpga_set_err_irq(mgr, &err_irq_set);
+ if (ret)
+ return -EINVAL;
+ } else if (type == IFPGA_AFU_IRQ) {
+ acc = opae_adapter_get_acc(adapter, port_id);
+ if (!acc)
+ return -EINVAL;
+
+ ret = opae_acc_set_irq(acc, vec_start, count, intr_handle.efds);
+ if (ret)
+ return -EINVAL;
+ }
+
+ /* register interrupt handler using DPDK API */
+ ret = rte_intr_callback_register(&intr_handle,
+ handler, (void *)arg);
+ if (ret)
+ return -EINVAL;
+
+ IFPGA_RAWDEV_PMD_INFO("success register %s interrupt\n", name);
+
+ return 0;
+}
+
+static int
+ifpga_rawdev_create(struct rte_pci_device *pci_dev,
+ int socket_id)
+{
+ int ret = 0;
+ struct rte_rawdev *rawdev = NULL;
+ struct ifpga_rawdev *dev = NULL;
+ struct opae_adapter *adapter = NULL;
+ struct opae_manager *mgr = NULL;
+ struct opae_adapter_data_pci *data = NULL;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ int i;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%02x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct opae_adapter),
+ socket_id);
+ if (rawdev == NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ipn3ke_bridge_func.get_ifpga_rawdev = ifpga_rawdev_get;
+ ipn3ke_bridge_func.set_i40e_sw_dev = rte_pmd_i40e_set_switch_dev;
+
+ dev = ifpga_rawdev_allocate(rawdev);
+ if (dev == NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Unable to allocate ifpga_rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ dev->aer_enable = 0;
+
+ /* alloc OPAE_FPGA_PCI data to register to OPAE hardware level API */
+ data = opae_adapter_data_alloc(OPAE_FPGA_PCI);
+ if (!data) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* init opae_adapter_data_pci for device specific information */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ data->region[i].phys_addr = pci_dev->mem_resource[i].phys_addr;
+ data->region[i].len = pci_dev->mem_resource[i].len;
+ data->region[i].addr = pci_dev->mem_resource[i].addr;
+ }
+ data->device_id = pci_dev->id.device_id;
+ data->vendor_id = pci_dev->id.vendor_id;
+ data->bus = pci_dev->addr.bus;
+ data->devid = pci_dev->addr.devid;
+ data->function = pci_dev->addr.function;
+ data->vfio_dev_fd = pci_dev->intr_handle.vfio_dev_fd;
+
+ adapter = rawdev->dev_private;
+ /* create a opae_adapter based on above device data */
+ ret = opae_adapter_init(adapter, pci_dev->device.name, data);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_adapter_data;
+ }
+
+ rawdev->dev_ops = &ifpga_rawdev_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->driver->driver.name;
+
+ /* must enumerate the adapter before use it */
+ ret = opae_adapter_enumerate(adapter);
+ if (ret)
+ goto free_adapter_data;
+
+ /* get opae_manager to rawdev */
+ mgr = opae_adapter_get_mgr(adapter);
+ if (mgr) {
+ /* PF function */
+ IFPGA_RAWDEV_PMD_INFO("this is a PF function");
+ }
+
+ ret = ifpga_register_msix_irq(rawdev, 0, IFPGA_FME_IRQ, 0, 0,
+ fme_interrupt_handler, "fme_irq", mgr);
+ if (ret)
+ goto free_adapter_data;
+
+ return ret;
+
+free_adapter_data:
+ if (data)
+ opae_adapter_data_free(data);
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+{
+ int ret;
+ struct rte_rawdev *rawdev;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Closing %s on NUMA node %d",
+ name, rte_socket_id());
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rawdev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ adapter = ifpga_rawdev_get_priv(rawdev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ if (ifpga_unregister_msix_irq(IFPGA_FME_IRQ, 0,
+ fme_interrupt_handler, mgr))
+ return -EINVAL;
+
+ opae_adapter_data_free(adapter->data);
+ opae_adapter_free(adapter);
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ IFPGA_RAWDEV_PMD_DEBUG("Device cleanup failed");
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+ return ifpga_rawdev_create(pci_dev, rte_socket_id());
+}
+
+static int
+ifpga_rawdev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ ifpga_monitor_stop_func();
+ return ifpga_rawdev_destroy(pci_dev);
+}
+
+static struct rte_pci_driver rte_ifpga_rawdev_pmd = {
+ .id_table = pci_ifpga_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ifpga_rawdev_pci_probe,
+ .remove = ifpga_rawdev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(ifpga_rawdev_pci_driver, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ifpga_rawdev_init_log)
+{
+ ifpga_rawdev_logtype = rte_log_register("driver.raw.init");
+ if (ifpga_rawdev_logtype >= 0)
+ rte_log_set_level(ifpga_rawdev_logtype, RTE_LOG_NOTICE);
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+static int ifpga_rawdev_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int size;
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ size = strlen(value) + 1;
+ *(char **)extra_args = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ strlcpy(*(char **)extra_args, value, size);
+
+ return 0;
+}
+static int
+ifpga_cfg_probe(struct rte_vdev_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_rawdev *rawdev = NULL;
+ struct ifpga_rawdev *ifpga_dev;
+ int port;
+ char *name = NULL;
+ const char *bdf;
+ char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+ int ret = -1;
+
+ devargs = dev->device.devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_RAWDEV_PMD_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &ifpga_rawdev_get_string_arg,
+ &name) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist,
+ IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg,
+ &port) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name);
+ rawdev = rte_rawdev_pmd_get_named_dev(dev_name);
+ if (!rawdev)
+ goto end;
+ ifpga_dev = ifpga_rawdev_get(rawdev);
+ if (!ifpga_dev)
+ goto end;
+ bdf = name;
+ ifpga_rawdev_fill_info(ifpga_dev, bdf);
+
+ ifpga_monitor_start_func();
+
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
+ port, name);
+
+ ret = rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+ dev_name, devargs->args);
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return ret;
+}
+
+static int
+ifpga_cfg_remove(struct rte_vdev_device *vdev)
+{
+ IFPGA_RAWDEV_PMD_INFO("Remove ifpga_cfg %p",
+ vdev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver ifpga_cfg_driver = {
+ .probe = ifpga_cfg_probe,
+ .remove = ifpga_cfg_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(ifpga_rawdev_cfg, ifpga_cfg_driver);
+RTE_PMD_REGISTER_ALIAS(ifpga_rawdev_cfg, ifpga_cfg);
+RTE_PMD_REGISTER_PARAM_STRING(ifpga_rawdev_cfg,
+ "ifpga=<string> "
+ "port=<int> "
+ "afu_bts=<path>");
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.h b/src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.h
new file mode 100644
index 000000000..7754beb02
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/ifpga_rawdev.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_RAWDEV_H_
+#define _IFPGA_RAWDEV_H_
+
+extern int ifpga_rawdev_logtype;
+
+#define IFPGA_RAWDEV_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_RAWDEV_PMD_FUNC_TRACE() IFPGA_RAWDEV_PMD_LOG(DEBUG, ">>")
+
+#define IFPGA_RAWDEV_PMD_DEBUG(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(DEBUG, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_INFO(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(INFO, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_ERR(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(ERR, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_WARN(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(WARNING, fmt, ## args)
+
+enum ifpga_rawdev_device_state {
+ IFPGA_IDLE,
+ IFPGA_READY,
+ IFPGA_ERROR
+};
+
+/** Set a bit in the uint64 variable */
+#define IFPGA_BIT_SET(var, pos) \
+ ((var) |= ((uint64_t)1 << ((pos))))
+
+/** Reset the bit in the variable */
+#define IFPGA_BIT_RESET(var, pos) \
+ ((var) &= ~((uint64_t)1 << ((pos))))
+
+/** Check the bit is set in the variable */
+#define IFPGA_BIT_ISSET(var, pos) \
+ (((var) & ((uint64_t)1 << ((pos)))) ? 1 : 0)
+
+static inline struct opae_adapter *
+ifpga_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+#define IFPGA_RAWDEV_MSIX_IRQ_NUM 7
+#define IFPGA_RAWDEV_NUM 32
+
+struct ifpga_rawdev {
+ int dev_id;
+ struct rte_rawdev *rawdev;
+ int aer_enable;
+ int intr_fd[IFPGA_RAWDEV_MSIX_IRQ_NUM+1];
+ uint32_t aer_old[2];
+ char fvl_bdf[8][16];
+ char parent_bdf[16];
+};
+
+struct ifpga_rawdev *
+ifpga_rawdev_get(const struct rte_rawdev *rawdev);
+
+enum ifpga_irq_type {
+ IFPGA_FME_IRQ = 0,
+ IFPGA_AFU_IRQ = 1,
+};
+
+int
+ifpga_register_msix_irq(struct rte_rawdev *dev, int port_id,
+ enum ifpga_irq_type type, int vec_start, int count,
+ rte_intr_callback_fn handler, const char *name,
+ void *arg);
+int
+ifpga_unregister_msix_irq(enum ifpga_irq_type type,
+ int vec_start, rte_intr_callback_fn handler, void *arg);
+
+#endif /* _IFPGA_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/meson.build b/src/spdk/dpdk/drivers/raw/ifpga/meson.build
new file mode 100644
index 000000000..05a1711b5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = dependency('libfdt', required: false)
+if not dep.found()
+ dep = cc.find_library('libfdt', required: false)
+endif
+if not dep.found()
+ build = false
+ reason = 'missing dependency, "libfdt"'
+ subdir_done()
+endif
+
+subdir('base')
+objs = [base_objs]
+
+deps += ['ethdev', 'rawdev', 'pci', 'bus_pci', 'kvargs',
+ 'bus_vdev', 'bus_ifpga', 'net', 'pmd_i40e', 'pmd_ipn3ke']
+ext_deps += dep
+
+sources = files('ifpga_rawdev.c')
+
+includes += include_directories('base')
+includes += include_directories('../../net/ipn3ke')
+includes += include_directories('../../net/i40e')
diff --git a/src/spdk/dpdk/drivers/raw/ifpga/rte_rawdev_ifpga_version.map b/src/spdk/dpdk/drivers/raw/ifpga/rte_rawdev_ifpga_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ifpga/rte_rawdev_ifpga_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/ioat/Makefile b/src/spdk/dpdk/drivers/raw/ioat/Makefile
new file mode 100644
index 000000000..1609fe5e6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_rawdev_ioat.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_rawdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_mbuf -lrte_mempool
+
+# versioning export map
+EXPORT_MAP := rte_rawdev_ioat_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += ioat_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += ioat_rawdev_test.c
+
+# export include files
+SYMLINK-y-include += rte_ioat_rawdev.h
+SYMLINK-y-include += rte_ioat_spec.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c
new file mode 100644
index 000000000..a8a48f90a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <rte_cycles.h>
+#include <rte_bus_pci.h>
+#include <rte_string_fns.h>
+#include <rte_rawdev_pmd.h>
+
+#include "rte_ioat_rawdev.h"
+
+/* Dynamic log type identifier */
+int ioat_pmd_logtype;
+
+static struct rte_pci_driver ioat_pmd_drv;
+
+#define IOAT_VENDOR_ID 0x8086
+#define IOAT_DEVICE_ID_SKX 0x2021
+#define IOAT_DEVICE_ID_BDX0 0x6f20
+#define IOAT_DEVICE_ID_BDX1 0x6f21
+#define IOAT_DEVICE_ID_BDX2 0x6f22
+#define IOAT_DEVICE_ID_BDX3 0x6f23
+#define IOAT_DEVICE_ID_BDX4 0x6f24
+#define IOAT_DEVICE_ID_BDX5 0x6f25
+#define IOAT_DEVICE_ID_BDX6 0x6f26
+#define IOAT_DEVICE_ID_BDX7 0x6f27
+#define IOAT_DEVICE_ID_BDXE 0x6f2E
+#define IOAT_DEVICE_ID_BDXF 0x6f2F
+#define IOAT_DEVICE_ID_ICX 0x0b00
+
+#define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
+ ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)
+
+#define IOAT_PMD_DEBUG(fmt, args...) IOAT_PMD_LOG(DEBUG, fmt, ## args)
+#define IOAT_PMD_INFO(fmt, args...) IOAT_PMD_LOG(INFO, fmt, ## args)
+#define IOAT_PMD_ERR(fmt, args...) IOAT_PMD_LOG(ERR, fmt, ## args)
+#define IOAT_PMD_WARN(fmt, args...) IOAT_PMD_LOG(WARNING, fmt, ## args)
+
+#define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
+#define COMPLETION_SZ sizeof(__m128i)
+
+static int
+ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
+{
+ struct rte_ioat_rawdev_config *params = config;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ unsigned short i;
+
+ if (dev->started)
+ return -EBUSY;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (params->ring_size > 4096 || params->ring_size < 64 ||
+ !rte_is_power_of_2(params->ring_size))
+ return -EINVAL;
+
+ ioat->ring_size = params->ring_size;
+ if (ioat->desc_ring != NULL) {
+ rte_memzone_free(ioat->desc_mz);
+ ioat->desc_ring = NULL;
+ ioat->desc_mz = NULL;
+ }
+
+ /* allocate one block of memory for both descriptors
+ * and completion handles.
+ */
+ snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
+ ioat->desc_mz = rte_memzone_reserve(mz_name,
+ (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
+ dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
+ if (ioat->desc_mz == NULL)
+ return -ENOMEM;
+ ioat->desc_ring = ioat->desc_mz->addr;
+ ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
+
+ ioat->ring_addr = ioat->desc_mz->iova;
+
+ /* configure descriptor ring - each one points to next */
+ for (i = 0; i < ioat->ring_size; i++) {
+ ioat->desc_ring[i].next = ioat->ring_addr +
+ (((i + 1) % ioat->ring_size) * DESC_SZ);
+ }
+
+ return 0;
+}
+
+static int
+ioat_dev_start(struct rte_rawdev *dev)
+{
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
+ return -EBUSY;
+
+ /* inform hardware of where the descriptor ring is */
+ ioat->regs->chainaddr = ioat->ring_addr;
+ /* inform hardware of where to write the status/completions */
+ ioat->regs->chancmp = ioat->status_addr;
+
+ /* prime the status register to be set to the last element */
+ ioat->status = ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
+ return 0;
+}
+
+static void
+ioat_dev_stop(struct rte_rawdev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+static void
+ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
+{
+ struct rte_ioat_rawdev_config *cfg = dev_info;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (cfg != NULL)
+ cfg->ring_size = ioat->ring_size;
+}
+
+static const char * const xstat_names[] = {
+ "failed_enqueues", "successful_enqueues",
+ "copies_started", "copies_completed"
+};
+
+static int
+ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ const struct rte_ioat_rawdev *ioat = dev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ switch (ids[i]) {
+ case 0: values[i] = ioat->enqueue_failed; break;
+ case 1: values[i] = ioat->enqueued; break;
+ case 2: values[i] = ioat->started; break;
+ case 3: values[i] = ioat->completed; break;
+ default: values[i] = 0; break;
+ }
+ }
+ return n;
+}
+
+static int
+ioat_xstats_get_names(const struct rte_rawdev *dev,
+ struct rte_rawdev_xstats_name *names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ RTE_SET_USED(dev);
+ if (size < RTE_DIM(xstat_names))
+ return RTE_DIM(xstat_names);
+
+ for (i = 0; i < RTE_DIM(xstat_names); i++)
+ strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
+
+ return RTE_DIM(xstat_names);
+}
+
+static int
+ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
+{
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+ unsigned int i;
+
+ if (!ids) {
+ ioat->enqueue_failed = 0;
+ ioat->enqueued = 0;
+ ioat->started = 0;
+ ioat->completed = 0;
+ return 0;
+ }
+
+ for (i = 0; i < nb_ids; i++) {
+ switch (ids[i]) {
+ case 0:
+ ioat->enqueue_failed = 0;
+ break;
+ case 1:
+ ioat->enqueued = 0;
+ break;
+ case 2:
+ ioat->started = 0;
+ break;
+ case 3:
+ ioat->completed = 0;
+ break;
+ default:
+ IOAT_PMD_WARN("Invalid xstat id - cannot reset value");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+extern int ioat_rawdev_test(uint16_t dev_id);
+
+static int
+ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
+{
+ static const struct rte_rawdev_ops ioat_rawdev_ops = {
+ .dev_configure = ioat_dev_configure,
+ .dev_start = ioat_dev_start,
+ .dev_stop = ioat_dev_stop,
+ .dev_info_get = ioat_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
+ .dev_selftest = ioat_rawdev_test,
+ };
+
+ struct rte_rawdev *rawdev = NULL;
+ struct rte_ioat_rawdev *ioat = NULL;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int ret = 0;
+ int retry = 0;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
+ dev->device.numa_node);
+ if (rawdev == NULL) {
+ IOAT_PMD_ERR("Unable to allocate raw device");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
+ mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
+ dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL) {
+ IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ rawdev->dev_private = mz->addr;
+ rawdev->dev_ops = &ioat_rawdev_ops;
+ rawdev->device = &dev->device;
+ rawdev->driver_name = dev->device.driver->name;
+
+ ioat = rawdev->dev_private;
+ ioat->rawdev = rawdev;
+ ioat->mz = mz;
+ ioat->regs = dev->mem_resource[0].addr;
+ ioat->ring_size = 0;
+ ioat->desc_ring = NULL;
+ ioat->status_addr = ioat->mz->iova +
+ offsetof(struct rte_ioat_rawdev, status);
+
+ /* do device initialization - reset and set error behaviour */
+ if (ioat->regs->chancnt != 1)
+ IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
+ ioat->regs->chancnt);
+
+ if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
+ IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
+ ioat->regs->chanctrl = 0;
+ }
+
+ ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
+ rte_delay_ms(1);
+ ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
+ rte_delay_ms(1);
+ while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
+ ioat->regs->chainaddr = 0;
+ rte_delay_ms(1);
+ if (++retry >= 200) {
+ IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
+ __func__,
+ ioat->regs->chancmd,
+ ioat->regs->chansts,
+ ioat->regs->chanerr);
+ ret = -EIO;
+ }
+ }
+ ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
+ RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
+
+ return 0;
+
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ioat_rawdev_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid device name");
+ return -EINVAL;
+ }
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ IOAT_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ if (rdev->dev_private != NULL) {
+ struct rte_ioat_rawdev *ioat = rdev->dev_private;
+ rdev->dev_private = NULL;
+ rte_memzone_free(ioat->desc_mz);
+ rte_memzone_free(ioat->mz);
+ }
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ IOAT_PMD_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
+{
+ char name[32];
+ int ret = 0;
+
+
+ rte_pci_device_name(&dev->addr, name, sizeof(name));
+ IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
+
+ dev->device.driver = &drv->driver;
+ ret = ioat_rawdev_create(name, dev);
+ return ret;
+}
+
+static int
+ioat_rawdev_remove(struct rte_pci_device *dev)
+{
+ char name[32];
+ int ret;
+
+ rte_pci_device_name(&dev->addr, name, sizeof(name));
+
+ IOAT_PMD_INFO("Closing %s on NUMA node %d",
+ name, dev->device.numa_node);
+
+ ret = ioat_rawdev_destroy(name);
+ return ret;
+}
+
+static const struct rte_pci_id pci_id_ioat_map[] = {
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver ioat_pmd_drv = {
+ .id_table = pci_id_ioat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = ioat_rawdev_probe,
+ .remove = ioat_rawdev_remove,
+};
+
+RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
+RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
+RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");
+
+RTE_INIT(ioat_pmd_init_log)
+{
+ ioat_pmd_logtype = rte_log_register(IOAT_PMD_LOG_NAME);
+ if (ioat_pmd_logtype >= 0)
+ rte_log_set_level(ioat_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c
new file mode 100644
index 000000000..c37351af2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <inttypes.h>
+#include <rte_mbuf.h>
+#include "rte_rawdev.h"
+#include "rte_ioat_rawdev.h"
+
+int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
+
+static struct rte_mempool *pool;
+static unsigned short expected_ring_size;
+
+static int
+test_enqueue_copies(int dev_id)
+{
+ const unsigned int length = 1024;
+ unsigned int i;
+
+ do {
+ struct rte_mbuf *src, *dst;
+ char *src_data, *dst_data;
+ struct rte_mbuf *completed[2] = {0};
+
+ /* test doing a single copy */
+ src = rte_pktmbuf_alloc(pool);
+ dst = rte_pktmbuf_alloc(pool);
+ src->data_len = src->pkt_len = length;
+ dst->data_len = dst->pkt_len = length;
+ src_data = rte_pktmbuf_mtod(src, char *);
+ dst_data = rte_pktmbuf_mtod(dst, char *);
+
+ for (i = 0; i < length; i++)
+ src_data[i] = rand() & 0xFF;
+
+ if (rte_ioat_enqueue_copy(dev_id,
+ src->buf_iova + src->data_off,
+ dst->buf_iova + dst->data_off,
+ length,
+ (uintptr_t)src,
+ (uintptr_t)dst,
+ 0 /* no fence */) != 1) {
+ printf("Error with rte_ioat_enqueue_copy\n");
+ return -1;
+ }
+ rte_ioat_do_copies(dev_id);
+ usleep(10);
+
+ if (rte_ioat_completed_copies(dev_id, 1, (void *)&completed[0],
+ (void *)&completed[1]) != 1) {
+ printf("Error with rte_ioat_completed_copies\n");
+ return -1;
+ }
+ if (completed[0] != src || completed[1] != dst) {
+ printf("Error with completions: got (%p, %p), not (%p,%p)\n",
+ completed[0], completed[1], src, dst);
+ return -1;
+ }
+
+ for (i = 0; i < length; i++)
+ if (dst_data[i] != src_data[i]) {
+ printf("Data mismatch at char %u\n", i);
+ return -1;
+ }
+ rte_pktmbuf_free(src);
+ rte_pktmbuf_free(dst);
+ } while (0);
+
+ /* test doing multiple copies */
+ do {
+ struct rte_mbuf *srcs[32], *dsts[32];
+ struct rte_mbuf *completed_src[64];
+ struct rte_mbuf *completed_dst[64];
+ unsigned int j;
+
+ for (i = 0; i < RTE_DIM(srcs); i++) {
+ char *src_data;
+
+ srcs[i] = rte_pktmbuf_alloc(pool);
+ dsts[i] = rte_pktmbuf_alloc(pool);
+ srcs[i]->data_len = srcs[i]->pkt_len = length;
+ dsts[i]->data_len = dsts[i]->pkt_len = length;
+ src_data = rte_pktmbuf_mtod(srcs[i], char *);
+
+ for (j = 0; j < length; j++)
+ src_data[j] = rand() & 0xFF;
+
+ if (rte_ioat_enqueue_copy(dev_id,
+ srcs[i]->buf_iova + srcs[i]->data_off,
+ dsts[i]->buf_iova + dsts[i]->data_off,
+ length,
+ (uintptr_t)srcs[i],
+ (uintptr_t)dsts[i],
+ 0 /* nofence */) != 1) {
+ printf("Error with rte_ioat_enqueue_copy for buffer %u\n",
+ i);
+ return -1;
+ }
+ }
+ rte_ioat_do_copies(dev_id);
+ usleep(100);
+
+ if (rte_ioat_completed_copies(dev_id, 64, (void *)completed_src,
+ (void *)completed_dst) != RTE_DIM(srcs)) {
+ printf("Error with rte_ioat_completed_copies\n");
+ return -1;
+ }
+ for (i = 0; i < RTE_DIM(srcs); i++) {
+ char *src_data, *dst_data;
+
+ if (completed_src[i] != srcs[i]) {
+ printf("Error with source pointer %u\n", i);
+ return -1;
+ }
+ if (completed_dst[i] != dsts[i]) {
+ printf("Error with dest pointer %u\n", i);
+ return -1;
+ }
+
+ src_data = rte_pktmbuf_mtod(srcs[i], char *);
+ dst_data = rte_pktmbuf_mtod(dsts[i], char *);
+ for (j = 0; j < length; j++)
+ if (src_data[j] != dst_data[j]) {
+ printf("Error with copy of packet %u, byte %u\n",
+ i, j);
+ return -1;
+ }
+ rte_pktmbuf_free(srcs[i]);
+ rte_pktmbuf_free(dsts[i]);
+ }
+
+ } while (0);
+
+ return 0;
+}
+
+int
+ioat_rawdev_test(uint16_t dev_id)
+{
+#define IOAT_TEST_RINGSIZE 512
+ struct rte_ioat_rawdev_config p = { .ring_size = -1 };
+ struct rte_rawdev_info info = { .dev_private = &p };
+ struct rte_rawdev_xstats_name *snames = NULL;
+ uint64_t *stats = NULL;
+ unsigned int *ids = NULL;
+ unsigned int nb_xstats;
+ unsigned int i;
+
+ rte_rawdev_info_get(dev_id, &info);
+ if (p.ring_size != expected_ring_size) {
+ printf("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
+ (int)p.ring_size, expected_ring_size);
+ return -1;
+ }
+
+ p.ring_size = IOAT_TEST_RINGSIZE;
+ if (rte_rawdev_configure(dev_id, &info) != 0) {
+ printf("Error with rte_rawdev_configure()\n");
+ return -1;
+ }
+ rte_rawdev_info_get(dev_id, &info);
+ if (p.ring_size != IOAT_TEST_RINGSIZE) {
+ printf("Error, ring size is not %d (%d)\n",
+ IOAT_TEST_RINGSIZE, (int)p.ring_size);
+ return -1;
+ }
+ expected_ring_size = p.ring_size;
+
+ if (rte_rawdev_start(dev_id) != 0) {
+ printf("Error with rte_rawdev_start()\n");
+ return -1;
+ }
+
+ pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
+ 256, /* n == num elements */
+ 32, /* cache size */
+ 0, /* priv size */
+ 2048, /* data room size */
+ info.socket_id);
+ if (pool == NULL) {
+ printf("Error with mempool creation\n");
+ return -1;
+ }
+
+ /* allocate memory for xstats names and values */
+ nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
+
+ snames = malloc(sizeof(*snames) * nb_xstats);
+ if (snames == NULL) {
+ printf("Error allocating xstat names memory\n");
+ goto err;
+ }
+ rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
+
+ ids = malloc(sizeof(*ids) * nb_xstats);
+ if (ids == NULL) {
+ printf("Error allocating xstat ids memory\n");
+ goto err;
+ }
+ for (i = 0; i < nb_xstats; i++)
+ ids[i] = i;
+
+ stats = malloc(sizeof(*stats) * nb_xstats);
+ if (stats == NULL) {
+ printf("Error allocating xstat memory\n");
+ goto err;
+ }
+
+ /* run the test cases */
+ for (i = 0; i < 100; i++) {
+ unsigned int j;
+
+ if (test_enqueue_copies(dev_id) != 0)
+ goto err;
+
+ rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
+ for (j = 0; j < nb_xstats; j++)
+ printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
+ printf("\r");
+ }
+ printf("\n");
+
+ rte_rawdev_stop(dev_id);
+ if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
+ printf("Error resetting xstat values\n");
+ goto err;
+ }
+
+ rte_mempool_free(pool);
+ free(snames);
+ free(stats);
+ free(ids);
+ return 0;
+
+err:
+ rte_rawdev_stop(dev_id);
+ rte_rawdev_xstats_reset(dev_id, NULL, 0);
+ rte_mempool_free(pool);
+ free(snames);
+ free(stats);
+ free(ids);
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ioat/meson.build b/src/spdk/dpdk/drivers/raw/ioat/meson.build
new file mode 100644
index 000000000..0878418ae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Intel Corporation
+
+build = dpdk_conf.has('RTE_ARCH_X86')
+reason = 'only supported on x86'
+sources = files('ioat_rawdev.c',
+ 'ioat_rawdev_test.c')
+deps += ['rawdev', 'bus_pci', 'mbuf']
+
+install_headers('rte_ioat_rawdev.h',
+ 'rte_ioat_spec.h')
diff --git a/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h
new file mode 100644
index 000000000..4b271f174
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_IOAT_RAWDEV_H_
+#define _RTE_IOAT_RAWDEV_H_
+
+/**
+ * @file rte_ioat_rawdev.h
+ *
+ * Definitions for using the ioat rawdev device driver
+ *
+ * @warning
+ * @b EXPERIMENTAL: these structures and APIs may change without prior notice
+ */
+
+#include <x86intrin.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_prefetch.h>
+#include "rte_ioat_spec.h"
+
+/** Name of the device driver */
+#define IOAT_PMD_RAWDEV_NAME rawdev_ioat
+/** String reported as the device driver name by rte_rawdev_info_get() */
+#define IOAT_PMD_RAWDEV_NAME_STR "rawdev_ioat"
+/** Name used to adjust the log level for this driver */
+#define IOAT_PMD_LOG_NAME "rawdev.ioat"
+
+/**
+ * Configuration structure for an ioat rawdev instance
+ *
+ * This structure is to be passed as the ".dev_private" parameter when
+ * calling the rte_rawdev_get_info() and rte_rawdev_configure() APIs on
+ * an ioat rawdev instance.
+ */
+struct rte_ioat_rawdev_config {
+ unsigned short ring_size;
+};
+
+/**
+ * @internal
+ * Structure representing a device instance
+ */
+struct rte_ioat_rawdev {
+ struct rte_rawdev *rawdev;
+ const struct rte_memzone *mz;
+ const struct rte_memzone *desc_mz;
+
+ volatile struct rte_ioat_registers *regs;
+ phys_addr_t status_addr;
+ phys_addr_t ring_addr;
+
+ unsigned short ring_size;
+ struct rte_ioat_generic_hw_desc *desc_ring;
+ __m128i *hdls; /* completion handles for returning to user */
+
+
+ unsigned short next_read;
+ unsigned short next_write;
+
+ /* some statistics for tracking, if added/changed update xstats fns*/
+ uint64_t enqueue_failed __rte_cache_aligned;
+ uint64_t enqueued;
+ uint64_t started;
+ uint64_t completed;
+
+ /* to report completions, the device will write status back here */
+ volatile uint64_t status __rte_cache_aligned;
+};
+
+/**
+ * Enqueue a copy operation onto the ioat device
+ *
+ * This queues up a copy operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ * The rawdev device id of the ioat instance
+ * @param src
+ * The physical address of the source buffer
+ * @param dst
+ * The physical address of the destination buffer
+ * @param length
+ * The length of the data to be copied
+ * @param src_hdl
+ * An opaque handle for the source data, to be returned when this operation
+ * has been completed and the user polls for the completion details
+ * @param dst_hdl
+ * An opaque handle for the destination data, to be returned when this
+ * operation has been completed and the user polls for the completion details
+ * @param fence
+ * A flag parameter indicating that hardware should not begin to perform any
+ * subsequently enqueued copy operations until after this operation has
+ * completed
+ * @return
+ * Number of operations enqueued, either 0 or 1
+ */
+static inline int
+rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+ unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
+ int fence)
+{
+ struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
+ unsigned short read = ioat->next_read;
+ unsigned short write = ioat->next_write;
+ unsigned short mask = ioat->ring_size - 1;
+ unsigned short space = mask + read - write;
+ struct rte_ioat_generic_hw_desc *desc;
+
+ if (space == 0) {
+ ioat->enqueue_failed++;
+ return 0;
+ }
+
+ ioat->next_write = write + 1;
+ write &= mask;
+
+ desc = &ioat->desc_ring[write];
+ desc->size = length;
+ /* set descriptor write-back every 16th descriptor */
+ desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3);
+ desc->src_addr = src;
+ desc->dest_addr = dst;
+
+ ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl, (int64_t)src_hdl);
+ rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
+
+ ioat->enqueued++;
+ return 1;
+}
+
+/**
+ * Trigger hardware to begin performing enqueued copy operations
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the copy operations previously enqueued by rte_ioat_enqueue_copy()
+ *
+ * @param dev_id
+ * The rawdev device id of the ioat instance
+ */
+static inline void
+rte_ioat_do_copies(int dev_id)
+{
+ struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
+ ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
+ .control.completion_update = 1;
+ rte_compiler_barrier();
+ ioat->regs->dmacount = ioat->next_write;
+ ioat->started = ioat->enqueued;
+}
+
+/**
+ * @internal
+ * Returns the index of the last completed operation.
+ */
+static inline int
+rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
+{
+ uint64_t status = ioat->status;
+
+ /* lower 3 bits indicate "transfer status" : active, idle, halted.
+ * We can ignore bit 0.
+ */
+ *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
+ return (status - ioat->ring_addr) >> 6;
+}
+
+/**
+ * Returns details of copy operations that have been completed
+ *
+ * Returns to the caller the user-provided "handles" for the copy operations
+ * which have been completed by the hardware, and not already returned by
+ * a previous call to this API.
+ *
+ * @param dev_id
+ * The rawdev device id of the ioat instance
+ * @param max_copies
+ * The number of entries which can fit in the src_hdls and dst_hdls
+ * arrays, i.e. max number of completed operations to report
+ * @param src_hdls
+ * Array to hold the source handle parameters of the completed copies
+ * @param dst_hdls
+ * Array to hold the destination handle parameters of the completed copies
+ * @return
+ * -1 on error, with rte_errno set appropriately.
+ * Otherwise number of completed operations i.e. number of entries written
+ * to the src_hdls and dst_hdls array parameters.
+ */
+static inline int
+rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
+ uintptr_t *src_hdls, uintptr_t *dst_hdls)
+{
+ struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
+ unsigned short mask = (ioat->ring_size - 1);
+ unsigned short read = ioat->next_read;
+ unsigned short end_read, count;
+ int error;
+ int i = 0;
+
+ end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
+ count = (end_read - (read & mask)) & mask;
+
+ if (error) {
+ rte_errno = EIO;
+ return -1;
+ }
+
+ if (count > max_copies)
+ count = max_copies;
+
+ for (; i < count - 1; i += 2, read += 2) {
+ __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
+ __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
+
+ _mm_storeu_si128((void *)&src_hdls[i],
+ _mm_unpacklo_epi64(hdls0, hdls1));
+ _mm_storeu_si128((void *)&dst_hdls[i],
+ _mm_unpackhi_epi64(hdls0, hdls1));
+ }
+ for (; i < count; i++, read++) {
+ uintptr_t *hdls = (void *)&ioat->hdls[read & mask];
+ src_hdls[i] = hdls[0];
+ dst_hdls[i] = hdls[1];
+ }
+
+ ioat->next_read = read;
+ ioat->completed += count;
+ return count;
+}
+
+#endif /* _RTE_IOAT_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h
new file mode 100644
index 000000000..c6e7929b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) Intel Corporation
+ */
+
+/**
+ * \file
+ * I/OAT specification definitions
+ *
+ * Taken from ioat_spec.h from SPDK project, with prefix renames and
+ * other minor changes.
+ */
+
+#ifndef RTE_IOAT_SPEC_H
+#define RTE_IOAT_SPEC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#define RTE_IOAT_PCI_CHANERR_INT_OFFSET 0x180
+
+#define RTE_IOAT_INTRCTRL_MASTER_INT_EN 0x01
+
+#define RTE_IOAT_VER_3_0 0x30
+#define RTE_IOAT_VER_3_3 0x33
+
+/* DMA Channel Registers */
+#define RTE_IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
+#define RTE_IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
+#define RTE_IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
+#define RTE_IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
+#define RTE_IOAT_CHANCTRL_ERR_INT_EN 0x0010
+#define RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
+#define RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
+#define RTE_IOAT_CHANCTRL_INT_REARM 0x0001
+
+/* DMA Channel Capabilities */
+#define RTE_IOAT_DMACAP_PB (1 << 0)
+#define RTE_IOAT_DMACAP_DCA (1 << 4)
+#define RTE_IOAT_DMACAP_BFILL (1 << 6)
+#define RTE_IOAT_DMACAP_XOR (1 << 8)
+#define RTE_IOAT_DMACAP_PQ (1 << 9)
+#define RTE_IOAT_DMACAP_DMA_DIF (1 << 10)
+
+struct rte_ioat_registers {
+ uint8_t chancnt;
+ uint8_t xfercap;
+ uint8_t genctrl;
+ uint8_t intrctrl;
+ uint32_t attnstatus;
+ uint8_t cbver; /* 0x08 */
+ uint8_t reserved4[0x3]; /* 0x09 */
+ uint16_t intrdelay; /* 0x0C */
+ uint16_t cs_status; /* 0x0E */
+ uint32_t dmacapability; /* 0x10 */
+ uint8_t reserved5[0x6C]; /* 0x14 */
+ uint16_t chanctrl; /* 0x80 */
+ uint8_t reserved6[0x2]; /* 0x82 */
+ uint8_t chancmd; /* 0x84 */
+ uint8_t reserved3[1]; /* 0x85 */
+ uint16_t dmacount; /* 0x86 */
+ uint64_t chansts; /* 0x88 */
+ uint64_t chainaddr; /* 0x90 */
+ uint64_t chancmp; /* 0x98 */
+ uint8_t reserved2[0x8]; /* 0xA0 */
+ uint32_t chanerr; /* 0xA8 */
+ uint32_t chanerrmask; /* 0xAC */
+} __rte_packed;
+
+#define RTE_IOAT_CHANCMD_RESET 0x20
+#define RTE_IOAT_CHANCMD_SUSPEND 0x04
+
+#define RTE_IOAT_CHANSTS_STATUS 0x7ULL
+#define RTE_IOAT_CHANSTS_ACTIVE 0x0
+#define RTE_IOAT_CHANSTS_IDLE 0x1
+#define RTE_IOAT_CHANSTS_SUSPENDED 0x2
+#define RTE_IOAT_CHANSTS_HALTED 0x3
+#define RTE_IOAT_CHANSTS_ARMED 0x4
+
+#define RTE_IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
+#define RTE_IOAT_CHANSTS_SOFT_ERROR 0x10ULL
+
+#define RTE_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
+
+#define RTE_IOAT_CHANCMP_ALIGN 8 /* CHANCMP address must be 64-bit aligned */
+
+struct rte_ioat_generic_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t reserved2: 1;
+ uint32_t src_page_break: 1;
+ uint32_t dest_page_break: 1;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t reserved: 13;
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t op_specific[4];
+};
+
+struct rte_ioat_dma_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t null: 1;
+ uint32_t src_page_break: 1;
+ uint32_t dest_page_break: 1;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t reserved: 13;
+#define RTE_IOAT_OP_COPY 0x00
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t reserved2;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct rte_ioat_fill_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t reserved: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t reserved2: 2;
+ uint32_t dest_page_break: 1;
+ uint32_t bundle: 1;
+ uint32_t reserved3: 15;
+#define RTE_IOAT_OP_FILL 0x01
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_data;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t next_dest_addr;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct rte_ioat_xor_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t src_count: 3;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t reserved: 13;
+#define RTE_IOAT_OP_XOR 0x87
+#define RTE_IOAT_OP_XOR_VAL 0x88
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+};
+
+struct rte_ioat_xor_ext_hw_desc {
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t next;
+ uint64_t reserved[4];
+};
+
+struct rte_ioat_pq_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t src_count: 3;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t p_disable: 1;
+ uint32_t q_disable: 1;
+ uint32_t reserved: 11;
+#define RTE_IOAT_OP_PQ 0x89
+#define RTE_IOAT_OP_PQ_VAL 0x8a
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint8_t coef[8];
+ uint64_t q_addr;
+};
+
+struct rte_ioat_pq_ext_hw_desc {
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t next;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t reserved[2];
+};
+
+struct rte_ioat_pq_update_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t src_cnt: 3;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t p_disable: 1;
+ uint32_t q_disable: 1;
+ uint32_t reserved: 3;
+ uint32_t coef: 8;
+#define RTE_IOAT_OP_PQ_UP 0x8b
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t p_src;
+ uint64_t q_src;
+ uint64_t q_addr;
+};
+
+struct rte_ioat_raw_hw_desc {
+ uint64_t field[8];
+};
+
+union rte_ioat_hw_desc {
+ struct rte_ioat_raw_hw_desc raw;
+ struct rte_ioat_generic_hw_desc generic;
+ struct rte_ioat_dma_hw_desc dma;
+ struct rte_ioat_fill_hw_desc fill;
+ struct rte_ioat_xor_hw_desc xor_desc;
+ struct rte_ioat_xor_ext_hw_desc xor_ext;
+ struct rte_ioat_pq_hw_desc pq;
+ struct rte_ioat_pq_ext_hw_desc pq_ext;
+ struct rte_ioat_pq_update_hw_desc pq_update;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_IOAT_SPEC_H */
diff --git a/src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map b/src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/meson.build b/src/spdk/dpdk/drivers/raw/meson.build
new file mode 100644
index 000000000..bb5797760
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+drivers = ['dpaa2_cmdif', 'dpaa2_qdma',
+ 'ifpga', 'ioat', 'ntb',
+ 'octeontx2_dma',
+ 'octeontx2_ep',
+ 'skeleton']
+std_deps = ['rawdev']
+config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
+driver_name_fmt = 'rte_rawdev_@0@'
diff --git a/src/spdk/dpdk/drivers/raw/ntb/Makefile b/src/spdk/dpdk/drivers/raw/ntb/Makefile
new file mode 100644
index 000000000..8f0bff9b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_rawdev_ntb.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_rawdev
+
+EXPORT_MAP := rte_rawdev_ntb_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += ntb.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += ntb_hw_intel.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV)-include := rte_pmd_ntb.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/ntb/meson.build b/src/spdk/dpdk/drivers/raw/ntb/meson.build
new file mode 100644
index 000000000..2926193a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation.
+
+deps += ['rawdev', 'mbuf', 'mempool',
+ 'pci', 'bus_pci']
+sources = files('ntb.c',
+ 'ntb_hw_intel.c')
+install_headers('rte_pmd_ntb.h')
diff --git a/src/spdk/dpdk/drivers/raw/ntb/ntb.c b/src/spdk/dpdk/drivers/raw/ntb/ntb.c
new file mode 100644
index 000000000..dd0b72f8c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/ntb.c
@@ -0,0 +1,1513 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation.
+ */
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_mbuf.h>
+#include <rte_bus_pci.h>
+#include <rte_memzone.h>
+#include <rte_memcpy.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "ntb_hw_intel.h"
+#include "rte_pmd_ntb.h"
+#include "ntb.h"
+
+int ntb_logtype;
+
+static const struct rte_pci_id pci_id_ntb_map[] = {
+ { RTE_PCI_DEVICE(NTB_INTEL_VENDOR_ID, NTB_INTEL_DEV_ID_B2B_SKX) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+/* Align with enum ntb_xstats_idx */
+static struct rte_rawdev_xstats_name ntb_xstats_names[] = {
+ {"Tx-packets"},
+ {"Tx-bytes"},
+ {"Tx-errors"},
+ {"Rx-packets"},
+ {"Rx-bytes"},
+ {"Rx-missed"},
+};
+#define NTB_XSTATS_NUM RTE_DIM(ntb_xstats_names)
+
+static inline void
+ntb_link_cleanup(struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ int status, i;
+
+ if (hw->ntb_ops->spad_write == NULL ||
+ hw->ntb_ops->mw_set_trans == NULL) {
+ NTB_LOG(ERR, "Not supported to clean up link.");
+ return;
+ }
+
+ /* Clean spad registers. */
+ for (i = 0; i < hw->spad_cnt; i++) {
+ status = (*hw->ntb_ops->spad_write)(dev, i, 0, 0);
+ if (status)
+ NTB_LOG(ERR, "Failed to clean local spad.");
+ }
+
+ /* Clear mw so that peer cannot access local memory.*/
+ for (i = 0; i < hw->used_mw_num; i++) {
+ status = (*hw->ntb_ops->mw_set_trans)(dev, i, 0, 0);
+ if (status)
+ NTB_LOG(ERR, "Failed to clean mw.");
+ }
+}
+
+static inline int
+ntb_handshake_work(const struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t val;
+ int ret, i;
+
+ if (hw->ntb_ops->spad_write == NULL ||
+ hw->ntb_ops->mw_set_trans == NULL) {
+ NTB_LOG(ERR, "Scratchpad/MW setting is not supported.");
+ return -ENOTSUP;
+ }
+
+ /* Tell peer the mw info of local side. */
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < hw->mw_cnt; i++) {
+ NTB_LOG(INFO, "Local %u mw size: 0x%"PRIx64"", i,
+ hw->mw_size[i]);
+ val = hw->mw_size[i] >> 32;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ val = hw->mw_size[i];
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Tell peer about the queue info and map memory to the peer. */
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_Q_SZ, 1, hw->queue_size);
+ if (ret < 0)
+ return ret;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_QPS, 1,
+ hw->queue_pairs);
+ if (ret < 0)
+ return ret;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_USED_MWS, 1,
+ hw->used_mw_num);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < hw->used_mw_num; i++) {
+ val = (uint64_t)(size_t)(hw->mz[i]->addr) >> 32;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_H + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ val = (uint64_t)(size_t)(hw->mz[i]->addr);
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_L + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < hw->used_mw_num; i++) {
+ ret = (*hw->ntb_ops->mw_set_trans)(dev, i, hw->mz[i]->iova,
+ hw->mz[i]->len);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Ring doorbell 0 to tell peer the device is ready. */
+ ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+ntb_dev_intr_handler(void *param)
+{
+ struct rte_rawdev *dev = (struct rte_rawdev *)param;
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t val_h, val_l;
+ uint64_t peer_mw_size;
+ uint64_t db_bits = 0;
+ uint8_t peer_mw_cnt;
+ int i = 0;
+
+ if (hw->ntb_ops->db_read == NULL ||
+ hw->ntb_ops->db_clear == NULL ||
+ hw->ntb_ops->peer_db_set == NULL) {
+ NTB_LOG(ERR, "Doorbell is not supported.");
+ return;
+ }
+
+ db_bits = (*hw->ntb_ops->db_read)(dev);
+ if (!db_bits)
+ NTB_LOG(ERR, "No doorbells");
+
+ /* Doorbell 0 is for peer device ready. */
+ if (db_bits & 1) {
+ NTB_LOG(INFO, "DB0: Peer device is up.");
+ /* Clear received doorbell. */
+ (*hw->ntb_ops->db_clear)(dev, 1);
+
+ /**
+ * Peer dev is already up. All mw settings are already done.
+ * Skip them.
+ */
+ if (hw->peer_dev_up)
+ return;
+
+ if (hw->ntb_ops->spad_read == NULL) {
+ NTB_LOG(ERR, "Scratchpad read is not supported.");
+ return;
+ }
+
+ /* Check if mw setting on the peer is the same as local. */
+ peer_mw_cnt = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_MWS, 0);
+ if (peer_mw_cnt != hw->mw_cnt) {
+ NTB_LOG(ERR, "Both mw cnt must be the same.");
+ return;
+ }
+
+ for (i = 0; i < hw->mw_cnt; i++) {
+ val_h = (*hw->ntb_ops->spad_read)
+ (dev, SPAD_MW0_SZ_H + 2 * i, 0);
+ val_l = (*hw->ntb_ops->spad_read)
+ (dev, SPAD_MW0_SZ_L + 2 * i, 0);
+ peer_mw_size = ((uint64_t)val_h << 32) | val_l;
+ NTB_LOG(DEBUG, "Peer %u mw size: 0x%"PRIx64"", i,
+ peer_mw_size);
+ if (peer_mw_size != hw->mw_size[i]) {
+ NTB_LOG(ERR, "Mw config must be the same.");
+ return;
+ }
+ }
+
+ hw->peer_dev_up = 1;
+
+ /**
+ * Handshake with peer. Spad_write & mw_set_trans only works
+ * when both devices are up. So write spad again when db is
+ * received. And set db again for the later device who may miss
+ * the 1st db.
+ */
+ if (ntb_handshake_work(dev) < 0) {
+ NTB_LOG(ERR, "Handshake work failed.");
+ return;
+ }
+
+ /* To get the link info. */
+ if (hw->ntb_ops->get_link_status == NULL) {
+ NTB_LOG(ERR, "Not supported to get link status.");
+ return;
+ }
+ (*hw->ntb_ops->get_link_status)(dev);
+ NTB_LOG(INFO, "Link is up. Link speed: %u. Link width: %u",
+ hw->link_speed, hw->link_width);
+ return;
+ }
+
+ if (db_bits & (1 << 1)) {
+ NTB_LOG(INFO, "DB1: Peer device is down.");
+ /* Clear received doorbell. */
+ (*hw->ntb_ops->db_clear)(dev, 2);
+
+ /* Peer device will be down, So clean local side too. */
+ ntb_link_cleanup(dev);
+
+ hw->peer_dev_up = 0;
+ /* Response peer's dev_stop request. */
+ (*hw->ntb_ops->peer_db_set)(dev, 2);
+ return;
+ }
+
+ if (db_bits & (1 << 2)) {
+ NTB_LOG(INFO, "DB2: Peer device agrees dev to be down.");
+ /* Clear received doorbell. */
+ (*hw->ntb_ops->db_clear)(dev, (1 << 2));
+ hw->peer_dev_up = 0;
+ return;
+ }
+}
+
+static void
+ntb_queue_conf_get(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct ntb_queue_conf *q_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+
+ q_conf->tx_free_thresh = hw->tx_queues[queue_id]->tx_free_thresh;
+ q_conf->nb_desc = hw->rx_queues[queue_id]->nb_rx_desc;
+ q_conf->rx_mp = hw->rx_queues[queue_id]->mpool;
+}
+
+static void
+ntb_rxq_release_mbufs(struct ntb_rx_queue *q)
+{
+ int i;
+
+ if (!q || !q->sw_ring) {
+ NTB_LOG(ERR, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < q->nb_rx_desc; i++) {
+ if (q->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
+ q->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+ntb_rxq_release(struct ntb_rx_queue *rxq)
+{
+ if (!rxq) {
+ NTB_LOG(ERR, "Pointer to rxq is NULL");
+ return;
+ }
+
+ ntb_rxq_release_mbufs(rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static int
+ntb_rxq_setup(struct rte_rawdev *dev,
+ uint16_t qp_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct ntb_queue_conf *rxq_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq;
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ntb rx queue",
+ sizeof(struct ntb_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!rxq) {
+ NTB_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure.");
+ return -ENOMEM;
+ }
+
+ if (rxq_conf->rx_mp == NULL) {
+ NTB_LOG(ERR, "Invalid null mempool pointer.");
+ return -EINVAL;
+ }
+ rxq->nb_rx_desc = rxq_conf->nb_desc;
+ rxq->mpool = rxq_conf->rx_mp;
+ rxq->port_id = dev->dev_id;
+ rxq->queue_id = qp_id;
+ rxq->hw = hw;
+
+ /* Allocate the software ring. */
+ rxq->sw_ring =
+ rte_zmalloc_socket("ntb rx sw ring",
+ sizeof(struct ntb_rx_entry) *
+ rxq->nb_rx_desc,
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!rxq->sw_ring) {
+ ntb_rxq_release(rxq);
+ rxq = NULL;
+ NTB_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ hw->rx_queues[qp_id] = rxq;
+
+ return 0;
+}
+
+static void
+ntb_txq_release_mbufs(struct ntb_tx_queue *q)
+{
+ int i;
+
+ if (!q || !q->sw_ring) {
+ NTB_LOG(ERR, "Pointer to txq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < q->nb_tx_desc; i++) {
+ if (q->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
+ q->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+ntb_txq_release(struct ntb_tx_queue *txq)
+{
+ if (!txq) {
+ NTB_LOG(ERR, "Pointer to txq is NULL");
+ return;
+ }
+
+ ntb_txq_release_mbufs(txq);
+
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+}
+
+static int
+ntb_txq_setup(struct rte_rawdev *dev,
+ uint16_t qp_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct ntb_queue_conf *txq_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_tx_queue *txq;
+ uint16_t i, prev;
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("ntb tx queue",
+ sizeof(struct ntb_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!txq) {
+ NTB_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = txq_conf->nb_desc;
+ txq->port_id = dev->dev_id;
+ txq->queue_id = qp_id;
+ txq->hw = hw;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("ntb tx sw ring",
+ sizeof(struct ntb_tx_entry) *
+ txq->nb_tx_desc,
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!txq->sw_ring) {
+ ntb_txq_release(txq);
+ txq = NULL;
+ NTB_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ return -ENOMEM;
+ }
+
+ prev = txq->nb_tx_desc - 1;
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->sw_ring[i].mbuf = NULL;
+ txq->sw_ring[i].last_id = i;
+ txq->sw_ring[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_free_thresh = txq_conf->tx_free_thresh ?
+ txq_conf->tx_free_thresh :
+ NTB_DFLT_TX_FREE_THRESH;
+ if (txq->tx_free_thresh >= txq->nb_tx_desc - 3) {
+ NTB_LOG(ERR, "tx_free_thresh must be less than nb_desc - 3. "
+ "(tx_free_thresh=%u qp_id=%u)", txq->tx_free_thresh,
+ qp_id);
+ return -EINVAL;
+ }
+
+ hw->tx_queues[qp_id] = txq;
+
+ return 0;
+}
+
+
+static int
+ntb_queue_setup(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ int ret;
+
+ if (queue_id >= hw->queue_pairs)
+ return -EINVAL;
+
+ ret = ntb_txq_setup(dev, queue_id, queue_conf);
+ if (ret < 0)
+ return ret;
+
+ ret = ntb_rxq_setup(dev, queue_id, queue_conf);
+
+ return ret;
+}
+
+static int
+ntb_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
+{
+ struct ntb_hw *hw = dev->dev_private;
+
+ if (queue_id >= hw->queue_pairs)
+ return -EINVAL;
+
+ ntb_txq_release(hw->tx_queues[queue_id]);
+ hw->tx_queues[queue_id] = NULL;
+ ntb_rxq_release(hw->rx_queues[queue_id]);
+ hw->rx_queues[queue_id] = NULL;
+
+ return 0;
+}
+
+static uint16_t
+ntb_queue_count(struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ return hw->queue_pairs;
+}
+
+static int
+ntb_queue_init(struct rte_rawdev *dev, uint16_t qp_id)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq = hw->rx_queues[qp_id];
+ struct ntb_tx_queue *txq = hw->tx_queues[qp_id];
+ volatile struct ntb_header *local_hdr;
+ struct ntb_header *remote_hdr;
+ uint16_t q_size = hw->queue_size;
+ uint32_t hdr_offset;
+ void *bar_addr;
+ uint16_t i;
+
+ if (hw->ntb_ops->get_peer_mw_addr == NULL) {
+ NTB_LOG(ERR, "Getting peer mw addr is not supported.");
+ return -EINVAL;
+ }
+
+ /* Put queue info into the start of shared memory. */
+ hdr_offset = hw->hdr_size_per_queue * qp_id;
+ local_hdr = (volatile struct ntb_header *)
+ ((size_t)hw->mz[0]->addr + hdr_offset);
+ bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
+ if (bar_addr == NULL)
+ return -EINVAL;
+ remote_hdr = (struct ntb_header *)
+ ((size_t)bar_addr + hdr_offset);
+
+ /* rxq init. */
+ rxq->rx_desc_ring = (struct ntb_desc *)
+ (&remote_hdr->desc_ring);
+ rxq->rx_used_ring = (volatile struct ntb_used *)
+ (&local_hdr->desc_ring[q_size]);
+ rxq->avail_cnt = &remote_hdr->avail_cnt;
+ rxq->used_cnt = &local_hdr->used_cnt;
+
+ for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mpool);
+ if (unlikely(!mbuf)) {
+ NTB_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+ mbuf->port = dev->dev_id;
+
+ rxq->sw_ring[i].mbuf = mbuf;
+
+ rxq->rx_desc_ring[i].addr = rte_pktmbuf_mtod(mbuf, size_t);
+ rxq->rx_desc_ring[i].len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ rte_wmb();
+ *rxq->avail_cnt = rxq->nb_rx_desc - 1;
+ rxq->last_avail = rxq->nb_rx_desc - 1;
+ rxq->last_used = 0;
+
+ /* txq init */
+ txq->tx_desc_ring = (volatile struct ntb_desc *)
+ (&local_hdr->desc_ring);
+ txq->tx_used_ring = (struct ntb_used *)
+ (&remote_hdr->desc_ring[q_size]);
+ txq->avail_cnt = &local_hdr->avail_cnt;
+ txq->used_cnt = &remote_hdr->used_cnt;
+
+ rte_wmb();
+ *txq->used_cnt = 0;
+ txq->last_used = 0;
+ txq->last_avail = 0;
+ txq->nb_tx_free = txq->nb_tx_desc - 1;
+
+ /* Set per queue stats. */
+ for (i = 0; i < NTB_XSTATS_NUM; i++) {
+ hw->ntb_xstats[i + NTB_XSTATS_NUM * (qp_id + 1)] = 0;
+ hw->ntb_xstats_off[i + NTB_XSTATS_NUM * (qp_id + 1)] = 0;
+ }
+
+ return 0;
+}
+
+static inline void
+ntb_enqueue_cleanup(struct ntb_tx_queue *txq)
+{
+ struct ntb_tx_entry *sw_ring = txq->sw_ring;
+ uint16_t tx_free = txq->last_avail;
+ uint16_t nb_to_clean, i;
+
+ /* avail_cnt + 1 represents where to rx next in the peer. */
+ nb_to_clean = (*txq->avail_cnt - txq->last_avail + 1 +
+ txq->nb_tx_desc) & (txq->nb_tx_desc - 1);
+ nb_to_clean = RTE_MIN(nb_to_clean, txq->tx_free_thresh);
+ for (i = 0; i < nb_to_clean; i++) {
+ if (sw_ring[tx_free].mbuf)
+ rte_pktmbuf_free_seg(sw_ring[tx_free].mbuf);
+ tx_free = (tx_free + 1) & (txq->nb_tx_desc - 1);
+ }
+
+ txq->nb_tx_free += nb_to_clean;
+ txq->last_avail = tx_free;
+}
+
+static int
+ntb_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_tx_queue *txq = hw->tx_queues[(size_t)context];
+ struct ntb_tx_entry *sw_ring = txq->sw_ring;
+ struct rte_mbuf *txm;
+ struct ntb_used tx_used[NTB_MAX_DESC_SIZE];
+ volatile struct ntb_desc *tx_item;
+ uint16_t tx_last, nb_segs, off, last_used, avail_cnt;
+ uint16_t nb_mbufs = 0;
+ uint16_t nb_tx = 0;
+ uint64_t bytes = 0;
+ void *buf_addr;
+ int i;
+
+ if (unlikely(hw->ntb_ops->ioremap == NULL)) {
+ NTB_LOG(ERR, "Ioremap not supported.");
+ return nb_tx;
+ }
+
+ if (unlikely(dev->started == 0 || hw->peer_dev_up == 0)) {
+ NTB_LOG(DEBUG, "Link is not up.");
+ return nb_tx;
+ }
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ntb_enqueue_cleanup(txq);
+
+ off = NTB_XSTATS_NUM * ((size_t)context + 1);
+ last_used = txq->last_used;
+ avail_cnt = *txq->avail_cnt;/* Where to alloc next. */
+ for (nb_tx = 0; nb_tx < count; nb_tx++) {
+ txm = (struct rte_mbuf *)(buffers[nb_tx]->buf_addr);
+ if (txm == NULL || txq->nb_tx_free < txm->nb_segs)
+ break;
+
+ tx_last = (txq->last_used + txm->nb_segs - 1) &
+ (txq->nb_tx_desc - 1);
+ nb_segs = txm->nb_segs;
+ for (i = 0; i < nb_segs; i++) {
+ /* Not enough ring space for tx. */
+ if (txq->last_used == avail_cnt)
+ goto end_of_tx;
+ sw_ring[txq->last_used].mbuf = txm;
+ tx_item = txq->tx_desc_ring + txq->last_used;
+
+ if (!tx_item->len) {
+ (hw->ntb_xstats[NTB_TX_ERRS_ID + off])++;
+ goto end_of_tx;
+ }
+ if (txm->data_len > tx_item->len) {
+ NTB_LOG(ERR, "Data length exceeds buf length."
+ " Only %u data would be transmitted.",
+ tx_item->len);
+ txm->data_len = tx_item->len;
+ }
+
+ /* translate remote virtual addr to bar virtual addr */
+ buf_addr = (*hw->ntb_ops->ioremap)(dev, tx_item->addr);
+ if (buf_addr == NULL) {
+ (hw->ntb_xstats[NTB_TX_ERRS_ID + off])++;
+ NTB_LOG(ERR, "Null remap addr.");
+ goto end_of_tx;
+ }
+ rte_memcpy(buf_addr, rte_pktmbuf_mtod(txm, void *),
+ txm->data_len);
+
+ tx_used[nb_mbufs].len = txm->data_len;
+ tx_used[nb_mbufs++].flags = (txq->last_used ==
+ tx_last) ?
+ NTB_FLAG_EOP : 0;
+
+ /* update stats */
+ bytes += txm->data_len;
+
+ txm = txm->next;
+
+ sw_ring[txq->last_used].next_id = (txq->last_used + 1) &
+ (txq->nb_tx_desc - 1);
+ sw_ring[txq->last_used].last_id = tx_last;
+ txq->last_used = (txq->last_used + 1) &
+ (txq->nb_tx_desc - 1);
+ }
+ txq->nb_tx_free -= nb_segs;
+ }
+
+end_of_tx:
+ if (nb_tx) {
+ uint16_t nb1, nb2;
+ if (nb_mbufs > txq->nb_tx_desc - last_used) {
+ nb1 = txq->nb_tx_desc - last_used;
+ nb2 = nb_mbufs - txq->nb_tx_desc + last_used;
+ } else {
+ nb1 = nb_mbufs;
+ nb2 = 0;
+ }
+ rte_memcpy(txq->tx_used_ring + last_used, tx_used,
+ sizeof(struct ntb_used) * nb1);
+ rte_memcpy(txq->tx_used_ring, tx_used + nb1,
+ sizeof(struct ntb_used) * nb2);
+ rte_wmb();
+ *txq->used_cnt = txq->last_used;
+
+ /* update queue stats */
+ hw->ntb_xstats[NTB_TX_BYTES_ID + off] += bytes;
+ hw->ntb_xstats[NTB_TX_PKTS_ID + off] += nb_tx;
+ }
+
+ return nb_tx;
+}
+
+static int
+ntb_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq = hw->rx_queues[(size_t)context];
+ struct ntb_rx_entry *sw_ring = rxq->sw_ring;
+ struct ntb_desc rx_desc[NTB_MAX_DESC_SIZE];
+ struct rte_mbuf *first, *rxm_t;
+ struct rte_mbuf *prev = NULL;
+ volatile struct ntb_used *rx_item;
+ uint16_t nb_mbufs = 0;
+ uint16_t nb_rx = 0;
+ uint64_t bytes = 0;
+ uint16_t off, last_avail, used_cnt, used_nb;
+ int i;
+
+ if (unlikely(dev->started == 0 || hw->peer_dev_up == 0)) {
+ NTB_LOG(DEBUG, "Link is not up");
+ return nb_rx;
+ }
+
+ used_cnt = *rxq->used_cnt;
+
+ if (rxq->last_used == used_cnt)
+ return nb_rx;
+
+ last_avail = rxq->last_avail;
+ used_nb = (used_cnt - rxq->last_used) & (rxq->nb_rx_desc - 1);
+ count = RTE_MIN(count, used_nb);
+ for (nb_rx = 0; nb_rx < count; nb_rx++) {
+ i = 0;
+ while (true) {
+ rx_item = rxq->rx_used_ring + rxq->last_used;
+ rxm_t = sw_ring[rxq->last_used].mbuf;
+ rxm_t->data_len = rx_item->len;
+ rxm_t->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm_t->port = rxq->port_id;
+
+ if (!i) {
+ rxm_t->nb_segs = 1;
+ first = rxm_t;
+ first->pkt_len = 0;
+ buffers[nb_rx]->buf_addr = rxm_t;
+ } else {
+ prev->next = rxm_t;
+ first->nb_segs++;
+ }
+
+ prev = rxm_t;
+ first->pkt_len += prev->data_len;
+ rxq->last_used = (rxq->last_used + 1) &
+ (rxq->nb_rx_desc - 1);
+
+ /* alloc new mbuf */
+ rxm_t = rte_mbuf_raw_alloc(rxq->mpool);
+ if (unlikely(rxm_t == NULL)) {
+ NTB_LOG(ERR, "recv alloc mbuf failed.");
+ goto end_of_rx;
+ }
+ rxm_t->port = rxq->port_id;
+ sw_ring[rxq->last_avail].mbuf = rxm_t;
+ i++;
+
+ /* fill new desc */
+ rx_desc[nb_mbufs].addr =
+ rte_pktmbuf_mtod(rxm_t, size_t);
+ rx_desc[nb_mbufs++].len = rxm_t->buf_len -
+ RTE_PKTMBUF_HEADROOM;
+ rxq->last_avail = (rxq->last_avail + 1) &
+ (rxq->nb_rx_desc - 1);
+
+ if (rx_item->flags & NTB_FLAG_EOP)
+ break;
+ }
+ /* update stats */
+ bytes += first->pkt_len;
+ }
+
+end_of_rx:
+ if (nb_rx) {
+ uint16_t nb1, nb2;
+ if (nb_mbufs > rxq->nb_rx_desc - last_avail) {
+ nb1 = rxq->nb_rx_desc - last_avail;
+ nb2 = nb_mbufs - rxq->nb_rx_desc + last_avail;
+ } else {
+ nb1 = nb_mbufs;
+ nb2 = 0;
+ }
+ rte_memcpy(rxq->rx_desc_ring + last_avail, rx_desc,
+ sizeof(struct ntb_desc) * nb1);
+ rte_memcpy(rxq->rx_desc_ring, rx_desc + nb1,
+ sizeof(struct ntb_desc) * nb2);
+ rte_wmb();
+ *rxq->avail_cnt = rxq->last_avail;
+
+ /* update queue stats */
+ off = NTB_XSTATS_NUM * ((size_t)context + 1);
+ hw->ntb_xstats[NTB_RX_BYTES_ID + off] += bytes;
+ hw->ntb_xstats[NTB_RX_PKTS_ID + off] += nb_rx;
+ hw->ntb_xstats[NTB_RX_MISS_ID + off] += (count - nb_rx);
+ }
+
+ return nb_rx;
+}
+
+static void
+ntb_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_dev_info *info = dev_info;
+
+ info->mw_cnt = hw->mw_cnt;
+ info->mw_size = hw->mw_size;
+
+ /**
+ * Intel hardware requires that mapped memory base address should be
+ * aligned with EMBARSZ and needs continuous memzone.
+ */
+ info->mw_size_align = (uint8_t)(hw->pci_dev->id.vendor_id ==
+ NTB_INTEL_VENDOR_ID);
+
+ if (!hw->queue_size || !hw->queue_pairs) {
+ NTB_LOG(ERR, "No queue size and queue num assigned.");
+ return;
+ }
+
+ hw->hdr_size_per_queue = RTE_ALIGN(sizeof(struct ntb_header) +
+ hw->queue_size * sizeof(struct ntb_desc) +
+ hw->queue_size * sizeof(struct ntb_used),
+ RTE_CACHE_LINE_SIZE);
+ info->ntb_hdr_size = hw->hdr_size_per_queue * hw->queue_pairs;
+}
+
+static int
+ntb_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
+{
+ struct ntb_dev_config *conf = config;
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t xstats_num;
+ int ret;
+
+ hw->queue_pairs = conf->num_queues;
+ hw->queue_size = conf->queue_size;
+ hw->used_mw_num = conf->mz_num;
+ hw->mz = conf->mz_list;
+ hw->rx_queues = rte_zmalloc("ntb_rx_queues",
+ sizeof(struct ntb_rx_queue *) * hw->queue_pairs, 0);
+ hw->tx_queues = rte_zmalloc("ntb_tx_queues",
+ sizeof(struct ntb_tx_queue *) * hw->queue_pairs, 0);
+ /* First total stats, then per queue stats. */
+ xstats_num = (hw->queue_pairs + 1) * NTB_XSTATS_NUM;
+ hw->ntb_xstats = rte_zmalloc("ntb_xstats", xstats_num *
+ sizeof(uint64_t), 0);
+ hw->ntb_xstats_off = rte_zmalloc("ntb_xstats_off", xstats_num *
+ sizeof(uint64_t), 0);
+
+ /* Start handshake with the peer. */
+ ret = ntb_handshake_work(dev);
+ if (ret < 0) {
+ rte_free(hw->rx_queues);
+ rte_free(hw->tx_queues);
+ hw->rx_queues = NULL;
+ hw->tx_queues = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ntb_dev_start(struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t peer_base_l, peer_val;
+ uint64_t peer_base_h;
+ uint32_t i;
+ int ret;
+
+ if (!hw->link_status || !hw->peer_dev_up)
+ return -EINVAL;
+
+ /* Set total stats. */
+ for (i = 0; i < NTB_XSTATS_NUM; i++) {
+ hw->ntb_xstats[i] = 0;
+ hw->ntb_xstats_off[i] = 0;
+ }
+
+ for (i = 0; i < hw->queue_pairs; i++) {
+ ret = ntb_queue_init(dev, i);
+ if (ret) {
+ NTB_LOG(ERR, "Failed to init queue.");
+ goto err_q_init;
+ }
+ }
+
+ hw->peer_mw_base = rte_zmalloc("ntb_peer_mw_base", hw->mw_cnt *
+ sizeof(uint64_t), 0);
+
+ if (hw->ntb_ops->spad_read == NULL) {
+ ret = -ENOTSUP;
+ goto err_up;
+ }
+
+ peer_val = (*hw->ntb_ops->spad_read)(dev, SPAD_Q_SZ, 0);
+ if (peer_val != hw->queue_size) {
+ NTB_LOG(ERR, "Inconsistent queue size! (local: %u peer: %u)",
+ hw->queue_size, peer_val);
+ ret = -EINVAL;
+ goto err_up;
+ }
+
+ peer_val = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_QPS, 0);
+ if (peer_val != hw->queue_pairs) {
+ NTB_LOG(ERR, "Inconsistent number of queues! (local: %u peer:"
+ " %u)", hw->queue_pairs, peer_val);
+ ret = -EINVAL;
+ goto err_up;
+ }
+
+ hw->peer_used_mws = (*hw->ntb_ops->spad_read)(dev, SPAD_USED_MWS, 0);
+
+ for (i = 0; i < hw->peer_used_mws; i++) {
+ peer_base_h = (*hw->ntb_ops->spad_read)(dev,
+ SPAD_MW0_BA_H + 2 * i, 0);
+ peer_base_l = (*hw->ntb_ops->spad_read)(dev,
+ SPAD_MW0_BA_L + 2 * i, 0);
+ hw->peer_mw_base[i] = (peer_base_h << 32) + peer_base_l;
+ }
+
+ dev->started = 1;
+
+ return 0;
+
+err_up:
+ rte_free(hw->peer_mw_base);
+err_q_init:
+ for (i = 0; i < hw->queue_pairs; i++) {
+ ntb_rxq_release_mbufs(hw->rx_queues[i]);
+ ntb_txq_release_mbufs(hw->tx_queues[i]);
+ }
+
+ return ret;
+}
+
+static void
+ntb_dev_stop(struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t time_out;
+ int status, i;
+
+ if (!hw->peer_dev_up)
+ goto clean;
+
+ ntb_link_cleanup(dev);
+
+ /* Notify the peer that device will be down. */
+ if (hw->ntb_ops->peer_db_set == NULL) {
+ NTB_LOG(ERR, "Peer doorbell setting is not supported.");
+ return;
+ }
+ status = (*hw->ntb_ops->peer_db_set)(dev, 1);
+ if (status) {
+ NTB_LOG(ERR, "Failed to tell peer device is down.");
+ return;
+ }
+
+ /*
+ * Set time out as 1s in case that the peer is stopped accidently
+ * without any notification.
+ */
+ time_out = 1000000;
+
+ /* Wait for cleanup work down before db mask clear. */
+ while (hw->peer_dev_up && time_out) {
+ time_out -= 10;
+ rte_delay_us(10);
+ }
+
+clean:
+ /* Clear doorbells mask. */
+ if (hw->ntb_ops->db_set_mask == NULL) {
+ NTB_LOG(ERR, "Doorbell mask setting is not supported.");
+ return;
+ }
+ status = (*hw->ntb_ops->db_set_mask)(dev,
+ (((uint64_t)1 << hw->db_cnt) - 1));
+ if (status)
+ NTB_LOG(ERR, "Failed to clear doorbells.");
+
+ for (i = 0; i < hw->queue_pairs; i++) {
+ ntb_rxq_release_mbufs(hw->rx_queues[i]);
+ ntb_txq_release_mbufs(hw->tx_queues[i]);
+ }
+
+ dev->started = 0;
+}
+
+static int
+ntb_dev_close(struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct rte_intr_handle *intr_handle;
+ int i;
+
+ if (dev->started)
+ ntb_dev_stop(dev);
+
+ /* free queues */
+ for (i = 0; i < hw->queue_pairs; i++)
+ ntb_queue_release(dev, i);
+ hw->queue_pairs = 0;
+
+ intr_handle = &hw->pci_dev->intr_handle;
+ /* Clean datapath event and vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ /* Disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* Unregister callback func to eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ ntb_dev_intr_handler, dev);
+
+ return 0;
+}
+
+static int
+ntb_dev_reset(struct rte_rawdev *rawdev __rte_unused)
+{
+ return 0;
+}
+
+static int
+ntb_attr_set(struct rte_rawdev *dev, const char *attr_name,
+ uint64_t attr_value)
+{
+ struct ntb_hw *hw;
+ int index;
+
+ if (dev == NULL || attr_name == NULL) {
+ NTB_LOG(ERR, "Invalid arguments for setting attributes");
+ return -EINVAL;
+ }
+
+ hw = dev->dev_private;
+
+ if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
+ if (hw->ntb_ops->spad_write == NULL)
+ return -ENOTSUP;
+ index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
+ (*hw->ntb_ops->spad_write)(dev, hw->spad_user_list[index],
+ 1, attr_value);
+ NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
+ attr_name, attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_QUEUE_SZ_NAME, NTB_ATTR_NAME_LEN)) {
+ hw->queue_size = attr_value;
+ NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
+ attr_name, attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_QUEUE_NUM_NAME, NTB_ATTR_NAME_LEN)) {
+ hw->queue_pairs = attr_value;
+ NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
+ attr_name, attr_value);
+ return 0;
+ }
+
+ /* Attribute not found. */
+ NTB_LOG(ERR, "Attribute not found.");
+ return -EINVAL;
+}
+
+static int
+ntb_attr_get(struct rte_rawdev *dev, const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct ntb_hw *hw;
+ int index;
+
+ if (dev == NULL || attr_name == NULL || attr_value == NULL) {
+ NTB_LOG(ERR, "Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+
+ hw = dev->dev_private;
+
+ if (!strncmp(attr_name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN)) {
+ *attr_value = hw->topo;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_LINK_STATUS_NAME, NTB_ATTR_NAME_LEN)) {
+ /* hw->link_status only indicates hw link status. */
+ *attr_value = hw->link_status && hw->peer_dev_up;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_SPEED_NAME, NTB_ATTR_NAME_LEN)) {
+ *attr_value = hw->link_speed;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_WIDTH_NAME, NTB_ATTR_NAME_LEN)) {
+ *attr_value = hw->link_width;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_MW_CNT_NAME, NTB_ATTR_NAME_LEN)) {
+ *attr_value = hw->mw_cnt;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_DB_CNT_NAME, NTB_ATTR_NAME_LEN)) {
+ *attr_value = hw->db_cnt;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_SPAD_CNT_NAME, NTB_ATTR_NAME_LEN)) {
+ *attr_value = hw->spad_cnt;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
+ if (hw->ntb_ops->spad_read == NULL)
+ return -ENOTSUP;
+ index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
+ *attr_value = (*hw->ntb_ops->spad_read)(dev,
+ hw->spad_user_list[index], 0);
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ return 0;
+ }
+
+ /* Attribute not found. */
+ NTB_LOG(ERR, "Attribute not found.");
+ return -EINVAL;
+}
+
+static inline uint64_t
+ntb_stats_update(uint64_t offset, uint64_t stat)
+{
+ if (stat >= offset)
+ return (stat - offset);
+ else
+ return (uint64_t)(((uint64_t)-1) - offset + stat + 1);
+}
+
+static int
+ntb_xstats_get(const struct rte_rawdev *dev,
+ const unsigned int ids[],
+ uint64_t values[],
+ unsigned int n)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t i, j, off, xstats_num;
+
+ /* Calculate total stats of all queues. */
+ for (i = 0; i < NTB_XSTATS_NUM; i++) {
+ hw->ntb_xstats[i] = 0;
+ for (j = 0; j < hw->queue_pairs; j++) {
+ off = NTB_XSTATS_NUM * (j + 1) + i;
+ hw->ntb_xstats[i] +=
+ ntb_stats_update(hw->ntb_xstats_off[off],
+ hw->ntb_xstats[off]);
+ }
+ }
+
+ xstats_num = NTB_XSTATS_NUM * (hw->queue_pairs + 1);
+ for (i = 0; i < n && ids[i] < xstats_num; i++) {
+ if (ids[i] < NTB_XSTATS_NUM)
+ values[i] = hw->ntb_xstats[ids[i]];
+ else
+ values[i] =
+ ntb_stats_update(hw->ntb_xstats_off[ids[i]],
+ hw->ntb_xstats[ids[i]]);
+ }
+
+ return i;
+}
+
+static int
+ntb_xstats_get_names(const struct rte_rawdev *dev,
+ struct rte_rawdev_xstats_name *xstats_names,
+ unsigned int size)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t xstats_num, i, j, off;
+
+ xstats_num = NTB_XSTATS_NUM * (hw->queue_pairs + 1);
+ if (xstats_names == NULL || size < xstats_num)
+ return xstats_num;
+
+ /* Total stats names */
+ memcpy(xstats_names, ntb_xstats_names, sizeof(ntb_xstats_names));
+
+ /* Queue stats names */
+ for (i = 0; i < hw->queue_pairs; i++) {
+ for (j = 0; j < NTB_XSTATS_NUM; j++) {
+ off = j + (i + 1) * NTB_XSTATS_NUM;
+ snprintf(xstats_names[off].name,
+ sizeof(xstats_names[0].name),
+ "%s_q%u", ntb_xstats_names[j].name, i);
+ }
+ }
+
+ return xstats_num;
+}
+
+static uint64_t
+ntb_xstats_get_by_name(const struct rte_rawdev *dev,
+ const char *name, unsigned int *id)
+{
+ struct rte_rawdev_xstats_name *xstats_names;
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t xstats_num, i, j, off;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ xstats_num = NTB_XSTATS_NUM * (hw->queue_pairs + 1);
+ xstats_names = rte_zmalloc("ntb_stats_name",
+ sizeof(struct rte_rawdev_xstats_name) *
+ xstats_num, 0);
+ ntb_xstats_get_names(dev, xstats_names, xstats_num);
+
+ /* Calculate total stats of all queues. */
+ for (i = 0; i < NTB_XSTATS_NUM; i++) {
+ for (j = 0; j < hw->queue_pairs; j++) {
+ off = NTB_XSTATS_NUM * (j + 1) + i;
+ hw->ntb_xstats[i] +=
+ ntb_stats_update(hw->ntb_xstats_off[off],
+ hw->ntb_xstats[off]);
+ }
+ }
+
+ for (i = 0; i < xstats_num; i++) {
+ if (!strncmp(name, xstats_names[i].name,
+ RTE_RAW_DEV_XSTATS_NAME_SIZE)) {
+ *id = i;
+ rte_free(xstats_names);
+ if (i < NTB_XSTATS_NUM)
+ return hw->ntb_xstats[i];
+ else
+ return ntb_stats_update(hw->ntb_xstats_off[i],
+ hw->ntb_xstats[i]);
+ }
+ }
+
+ NTB_LOG(ERR, "Cannot find the xstats name.");
+
+ return -EINVAL;
+}
+
+static int
+ntb_xstats_reset(struct rte_rawdev *dev,
+ const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t i, j, off, xstats_num;
+
+ xstats_num = NTB_XSTATS_NUM * (hw->queue_pairs + 1);
+ for (i = 0; i < nb_ids && ids[i] < xstats_num; i++) {
+ if (ids[i] < NTB_XSTATS_NUM) {
+ for (j = 0; j < hw->queue_pairs; j++) {
+ off = NTB_XSTATS_NUM * (j + 1) + ids[i];
+ hw->ntb_xstats_off[off] = hw->ntb_xstats[off];
+ }
+ } else {
+ hw->ntb_xstats_off[ids[i]] = hw->ntb_xstats[ids[i]];
+ }
+ }
+
+ return i;
+}
+
+static const struct rte_rawdev_ops ntb_ops = {
+ .dev_info_get = ntb_dev_info_get,
+ .dev_configure = ntb_dev_configure,
+ .dev_start = ntb_dev_start,
+ .dev_stop = ntb_dev_stop,
+ .dev_close = ntb_dev_close,
+ .dev_reset = ntb_dev_reset,
+
+ .queue_def_conf = ntb_queue_conf_get,
+ .queue_setup = ntb_queue_setup,
+ .queue_release = ntb_queue_release,
+ .queue_count = ntb_queue_count,
+
+ .enqueue_bufs = ntb_enqueue_bufs,
+ .dequeue_bufs = ntb_dequeue_bufs,
+
+ .attr_get = ntb_attr_get,
+ .attr_set = ntb_attr_set,
+
+ .xstats_get = ntb_xstats_get,
+ .xstats_get_names = ntb_xstats_get_names,
+ .xstats_get_by_name = ntb_xstats_get_by_name,
+ .xstats_reset = ntb_xstats_reset,
+};
+
+static int
+ntb_init_hw(struct rte_rawdev *dev, struct rte_pci_device *pci_dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct rte_intr_handle *intr_handle;
+ int ret, i;
+
+ hw->pci_dev = pci_dev;
+ hw->peer_dev_up = 0;
+ hw->link_status = NTB_LINK_DOWN;
+ hw->link_speed = NTB_SPEED_NONE;
+ hw->link_width = NTB_WIDTH_NONE;
+
+ switch (pci_dev->id.device_id) {
+ case NTB_INTEL_DEV_ID_B2B_SKX:
+ hw->ntb_ops = &intel_ntb_ops;
+ break;
+ default:
+ NTB_LOG(ERR, "Not supported device.");
+ return -EINVAL;
+ }
+
+ if (hw->ntb_ops->ntb_dev_init == NULL)
+ return -ENOTSUP;
+ ret = (*hw->ntb_ops->ntb_dev_init)(dev);
+ if (ret) {
+ NTB_LOG(ERR, "Unable to init ntb dev.");
+ return ret;
+ }
+
+ if (hw->ntb_ops->set_link == NULL)
+ return -ENOTSUP;
+ ret = (*hw->ntb_ops->set_link)(dev, 1);
+ if (ret)
+ return ret;
+
+ /* Init doorbell. */
+ hw->db_valid_mask = RTE_LEN2MASK(hw->db_cnt, uint64_t);
+
+ intr_handle = &pci_dev->intr_handle;
+ /* Register callback func to eal lib */
+ rte_intr_callback_register(intr_handle,
+ ntb_dev_intr_handler, dev);
+
+ ret = rte_intr_efd_enable(intr_handle, hw->db_cnt);
+ if (ret)
+ return ret;
+
+ /* To clarify, the interrupt for each doorbell is already mapped
+ * by default for intel gen3. They are mapped to msix vec 1-32,
+ * and hardware intr is mapped to 0. Map all to 0 for uio.
+ */
+ if (!rte_intr_cap_multiple(intr_handle)) {
+ for (i = 0; i < hw->db_cnt; i++) {
+ if (hw->ntb_ops->vector_bind == NULL)
+ return -ENOTSUP;
+ ret = (*hw->ntb_ops->vector_bind)(dev, i, 0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (hw->ntb_ops->db_set_mask == NULL ||
+ hw->ntb_ops->peer_db_set == NULL) {
+ NTB_LOG(ERR, "Doorbell is not supported.");
+ return -ENOTSUP;
+ }
+ hw->db_mask = 0;
+ ret = (*hw->ntb_ops->db_set_mask)(dev, hw->db_mask);
+ if (ret) {
+ NTB_LOG(ERR, "Unable to enable intr for all dbs.");
+ return ret;
+ }
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
+ return ret;
+}
+
+static int
+ntb_create(struct rte_pci_device *pci_dev, int socket_id)
+{
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_rawdev *rawdev = NULL;
+ int ret;
+
+ if (pci_dev == NULL) {
+ NTB_LOG(ERR, "Invalid pci_dev.");
+ return -EINVAL;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ NTB_LOG(INFO, "Init %s on NUMA node %d", name, socket_id);
+
+ /* Allocate device structure. */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct ntb_hw),
+ socket_id);
+ if (rawdev == NULL) {
+ NTB_LOG(ERR, "Unable to allocate rawdev.");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &ntb_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->driver->driver.name;
+
+ ret = ntb_init_hw(rawdev, pci_dev);
+ if (ret < 0) {
+ NTB_LOG(ERR, "Unable to init ntb hw.");
+ goto fail;
+ }
+
+ return ret;
+
+fail:
+ if (rawdev != NULL)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ntb_destroy(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_rawdev *rawdev;
+ int ret;
+
+ if (pci_dev == NULL) {
+ NTB_LOG(ERR, "Invalid pci_dev.");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ NTB_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (rawdev == NULL) {
+ NTB_LOG(ERR, "Invalid device name (%s)", name);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ NTB_LOG(ERR, "Failed to destroy ntb rawdev.");
+
+ return ret;
+}
+
+static int
+ntb_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return ntb_create(pci_dev, rte_socket_id());
+}
+
+static int
+ntb_remove(struct rte_pci_device *pci_dev)
+{
+ return ntb_destroy(pci_dev);
+}
+
+
+static struct rte_pci_driver rte_ntb_pmd = {
+ .id_table = pci_id_ntb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_WC_ACTIVATE,
+ .probe = ntb_probe,
+ .remove = ntb_remove,
+};
+
+RTE_PMD_REGISTER_PCI(raw_ntb, rte_ntb_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(raw_ntb, pci_id_ntb_map);
+RTE_PMD_REGISTER_KMOD_DEP(raw_ntb, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ntb_init_log)
+{
+ ntb_logtype = rte_log_register("pmd.raw.ntb");
+ if (ntb_logtype >= 0)
+ rte_log_set_level(ntb_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/ntb/ntb.h b/src/spdk/dpdk/drivers/raw/ntb/ntb.h
new file mode 100644
index 000000000..a561c42d1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/ntb.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef _NTB_H_
+#define _NTB_H_
+
+#include <stdbool.h>
+
+extern int ntb_logtype;
+
+#define NTB_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ntb_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+/* Vendor ID */
+#define NTB_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define NTB_INTEL_DEV_ID_B2B_SKX 0x201C
+
+/* Reserved to app to use. */
+#define NTB_SPAD_USER "spad_user_"
+#define NTB_SPAD_USER_LEN (sizeof(NTB_SPAD_USER) - 1)
+#define NTB_SPAD_USER_MAX_NUM 4
+#define NTB_ATTR_NAME_LEN 30
+
+#define NTB_DFLT_TX_FREE_THRESH 256
+
+enum ntb_xstats_idx {
+ NTB_TX_PKTS_ID = 0,
+ NTB_TX_BYTES_ID,
+ NTB_TX_ERRS_ID,
+ NTB_RX_PKTS_ID,
+ NTB_RX_BYTES_ID,
+ NTB_RX_MISS_ID,
+};
+
+enum ntb_topo {
+ NTB_TOPO_NONE = 0,
+ NTB_TOPO_B2B_USD,
+ NTB_TOPO_B2B_DSD,
+};
+
+enum ntb_link {
+ NTB_LINK_DOWN = 0,
+ NTB_LINK_UP,
+};
+
+enum ntb_speed {
+ NTB_SPEED_NONE = 0,
+ NTB_SPEED_GEN1 = 1,
+ NTB_SPEED_GEN2 = 2,
+ NTB_SPEED_GEN3 = 3,
+ NTB_SPEED_GEN4 = 4,
+};
+
+enum ntb_width {
+ NTB_WIDTH_NONE = 0,
+ NTB_WIDTH_1 = 1,
+ NTB_WIDTH_2 = 2,
+ NTB_WIDTH_4 = 4,
+ NTB_WIDTH_8 = 8,
+ NTB_WIDTH_12 = 12,
+ NTB_WIDTH_16 = 16,
+ NTB_WIDTH_32 = 32,
+};
+
+/* Define spad registers usage. 0 is reserved. */
+enum ntb_spad_idx {
+ SPAD_NUM_MWS = 1,
+ SPAD_NUM_QPS,
+ SPAD_Q_SZ,
+ SPAD_USED_MWS,
+ SPAD_MW0_SZ_H,
+ SPAD_MW0_SZ_L,
+ SPAD_MW1_SZ_H,
+ SPAD_MW1_SZ_L,
+ SPAD_MW0_BA_H,
+ SPAD_MW0_BA_L,
+ SPAD_MW1_BA_H,
+ SPAD_MW1_BA_L,
+};
+
+/**
+ * NTB device operations
+ * @ntb_dev_init: Init ntb dev.
+ * @get_peer_mw_addr: To get the addr of peer mw[mw_idx].
+ * @mw_set_trans: Set translation of internal memory that remote can access.
+ * @ioremap: Translate the remote host address to bar address.
+ * @get_link_status: get link status, link speed and link width.
+ * @set_link: Set local side up/down.
+ * @spad_read: Read local/peer spad register val.
+ * @spad_write: Write val to local/peer spad register.
+ * @db_read: Read doorbells status.
+ * @db_clear: Clear local doorbells.
+ * @db_set_mask: Set bits in db mask, preventing db interrpts generated
+ * for those db bits.
+ * @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.
+ * @vector_bind: Bind vector source [intr] to msix vector [msix].
+ */
+struct ntb_dev_ops {
+ int (*ntb_dev_init)(const struct rte_rawdev *dev);
+ void *(*get_peer_mw_addr)(const struct rte_rawdev *dev, int mw_idx);
+ int (*mw_set_trans)(const struct rte_rawdev *dev, int mw_idx,
+ uint64_t addr, uint64_t size);
+ void *(*ioremap)(const struct rte_rawdev *dev, uint64_t addr);
+ int (*get_link_status)(const struct rte_rawdev *dev);
+ int (*set_link)(const struct rte_rawdev *dev, bool up);
+ uint32_t (*spad_read)(const struct rte_rawdev *dev, int spad,
+ bool peer);
+ int (*spad_write)(const struct rte_rawdev *dev, int spad,
+ bool peer, uint32_t spad_v);
+ uint64_t (*db_read)(const struct rte_rawdev *dev);
+ int (*db_clear)(const struct rte_rawdev *dev, uint64_t db_bits);
+ int (*db_set_mask)(const struct rte_rawdev *dev, uint64_t db_mask);
+ int (*peer_db_set)(const struct rte_rawdev *dev, uint8_t db_bit);
+ int (*vector_bind)(const struct rte_rawdev *dev, uint8_t intr,
+ uint8_t msix);
+};
+
+struct ntb_desc {
+ uint64_t addr; /* buffer addr */
+ uint16_t len; /* buffer length */
+ uint16_t rsv1;
+ uint32_t rsv2;
+};
+
+#define NTB_FLAG_EOP 1 /* end of packet */
+struct ntb_used {
+ uint16_t len; /* buffer length */
+ uint16_t flags; /* flags */
+};
+
+struct ntb_rx_entry {
+ struct rte_mbuf *mbuf;
+};
+
+struct ntb_rx_queue {
+ struct ntb_desc *rx_desc_ring;
+ volatile struct ntb_used *rx_used_ring;
+ uint16_t *avail_cnt;
+ volatile uint16_t *used_cnt;
+ uint16_t last_avail;
+ uint16_t last_used;
+ uint16_t nb_rx_desc;
+
+ uint16_t rx_free_thresh;
+
+ struct rte_mempool *mpool; /* mempool for mbuf allocation */
+ struct ntb_rx_entry *sw_ring;
+
+ uint16_t queue_id; /* DPDK queue index. */
+ uint16_t port_id; /* Device port identifier. */
+
+ struct ntb_hw *hw;
+};
+
+struct ntb_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+struct ntb_tx_queue {
+ volatile struct ntb_desc *tx_desc_ring;
+ struct ntb_used *tx_used_ring;
+ volatile uint16_t *avail_cnt;
+ uint16_t *used_cnt;
+ uint16_t last_avail; /* Next need to be free. */
+ uint16_t last_used; /* Next need to be sent. */
+ uint16_t nb_tx_desc;
+
+ /* Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ uint16_t tx_free_thresh;
+
+ struct ntb_tx_entry *sw_ring;
+
+ uint16_t queue_id; /* DPDK queue index. */
+ uint16_t port_id; /* Device port identifier. */
+
+ struct ntb_hw *hw;
+};
+
+struct ntb_header {
+ uint16_t avail_cnt __rte_cache_aligned;
+ uint16_t used_cnt __rte_cache_aligned;
+ struct ntb_desc desc_ring[] __rte_cache_aligned;
+};
+
+/* ntb private data. */
+struct ntb_hw {
+ uint8_t mw_cnt;
+ uint8_t db_cnt;
+ uint8_t spad_cnt;
+
+ uint64_t db_valid_mask;
+ uint64_t db_mask;
+
+ enum ntb_topo topo;
+
+ enum ntb_link link_status;
+ enum ntb_speed link_speed;
+ enum ntb_width link_width;
+
+ const struct ntb_dev_ops *ntb_ops;
+
+ struct rte_pci_device *pci_dev;
+ char *hw_addr;
+
+ uint8_t peer_dev_up;
+ uint64_t *mw_size;
+ /* remote mem base addr */
+ uint64_t *peer_mw_base;
+
+ uint16_t queue_pairs;
+ uint16_t queue_size;
+ uint32_t hdr_size_per_queue;
+
+ struct ntb_rx_queue **rx_queues;
+ struct ntb_tx_queue **tx_queues;
+
+ /* memzone to populate RX ring. */
+ const struct rte_memzone **mz;
+ uint8_t used_mw_num;
+
+ uint8_t peer_used_mws;
+
+ uint64_t *ntb_xstats;
+ uint64_t *ntb_xstats_off;
+
+ /* Reserve several spad for app to use. */
+ int spad_user_list[NTB_SPAD_USER_MAX_NUM];
+};
+
+#endif /* _NTB_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.c b/src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.c
new file mode 100644
index 000000000..e7f8667cd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.c
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation.
+ */
+#include <stdint.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include <rte_io.h>
+#include <rte_eal.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "ntb.h"
+#include "ntb_hw_intel.h"
+
+enum xeon_ntb_bar {
+ XEON_NTB_BAR23 = 2,
+ XEON_NTB_BAR45 = 4,
+};
+
+static enum xeon_ntb_bar intel_ntb_bar[] = {
+ XEON_NTB_BAR23,
+ XEON_NTB_BAR45,
+};
+
+static int
+intel_ntb_dev_init(const struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint8_t reg_val, bar;
+ int ret, i;
+
+ if (hw == NULL) {
+ NTB_LOG(ERR, "Invalid device.");
+ return -EINVAL;
+ }
+
+ ret = rte_pci_read_config(hw->pci_dev, &reg_val,
+ sizeof(reg_val), XEON_PPD_OFFSET);
+ if (ret < 0) {
+ NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
+ return -EIO;
+ }
+
+ /* Check connection topo type. Only support B2B. */
+ switch (reg_val & XEON_PPD_CONN_MASK) {
+ case XEON_PPD_CONN_B2B:
+ NTB_LOG(INFO, "Topo B2B (back to back) is using.");
+ break;
+ case XEON_PPD_CONN_TRANSPARENT:
+ case XEON_PPD_CONN_RP:
+ default:
+ NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
+ return -EINVAL;
+ }
+
+ /* Check device type. */
+ if (reg_val & XEON_PPD_DEV_DSD) {
+ NTB_LOG(INFO, "DSD, Downstream Device.");
+ hw->topo = NTB_TOPO_B2B_DSD;
+ } else {
+ NTB_LOG(INFO, "USD, Upstream device.");
+ hw->topo = NTB_TOPO_B2B_USD;
+ }
+
+ /* Check if bar4 is split. Do not support split bar. */
+ if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
+ NTB_LOG(ERR, "Do not support split bar.");
+ return -EINVAL;
+ }
+
+ hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
+
+ hw->mw_cnt = XEON_MW_COUNT;
+ hw->db_cnt = XEON_DB_COUNT;
+ hw->spad_cnt = XEON_SPAD_COUNT;
+
+ hw->mw_size = rte_zmalloc("ntb_mw_size",
+ hw->mw_cnt * sizeof(uint64_t), 0);
+ for (i = 0; i < hw->mw_cnt; i++) {
+ bar = intel_ntb_bar[i];
+ hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
+ }
+
+ /* Reserve the last 2 spad registers for users. */
+ for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
+ hw->spad_user_list[i] = hw->spad_cnt;
+ hw->spad_user_list[0] = hw->spad_cnt - 2;
+ hw->spad_user_list[1] = hw->spad_cnt - 1;
+
+ return 0;
+}
+
+static void *
+intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint8_t bar;
+
+ if (hw == NULL) {
+ NTB_LOG(ERR, "Invalid device.");
+ return 0;
+ }
+
+ if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
+ NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
+ hw->mw_cnt - 1);
+ return 0;
+ }
+
+ bar = intel_ntb_bar[mw_idx];
+
+ return hw->pci_dev->mem_resource[bar].addr;
+}
+
+static int
+intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
+ uint64_t addr, uint64_t size)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ void *xlat_addr, *limit_addr;
+ uint64_t xlat_off, limit_off;
+ uint64_t base, limit;
+ uint8_t bar;
+
+ if (hw == NULL) {
+ NTB_LOG(ERR, "Invalid device.");
+ return -EINVAL;
+ }
+
+ if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
+ NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
+ hw->mw_cnt - 1);
+ return -EINVAL;
+ }
+
+ bar = intel_ntb_bar[mw_idx];
+
+ xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
+ limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
+ xlat_addr = hw->hw_addr + xlat_off;
+ limit_addr = hw->hw_addr + limit_off;
+
+ /* Limit reg val should be EMBAR base address plus MW size. */
+ base = addr;
+ limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
+ rte_write64(base, xlat_addr);
+ rte_write64(limit, limit_addr);
+
+ /* Setup the external point so that remote can access. */
+ xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
+ xlat_addr = hw->hw_addr + xlat_off;
+ limit_off = XEON_EMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
+ limit_addr = hw->hw_addr + limit_off;
+ base = rte_read64(xlat_addr);
+ base &= ~0xf;
+ limit = base + size;
+ rte_write64(limit, limit_addr);
+
+ return 0;
+}
+
+static void *
+intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ void *mapped = NULL;
+ void *base;
+ int i;
+
+ for (i = 0; i < hw->peer_used_mws; i++) {
+ if (addr >= hw->peer_mw_base[i] &&
+ addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {
+ base = intel_ntb_get_peer_mw_addr(dev, i);
+ mapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +
+ (size_t)base);
+ break;
+ }
+ }
+
+ return mapped;
+}
+
+static int
+intel_ntb_get_link_status(const struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint16_t reg_val;
+ int ret;
+
+ if (hw == NULL) {
+ NTB_LOG(ERR, "Invalid device.");
+ return -EINVAL;
+ }
+
+ ret = rte_pci_read_config(hw->pci_dev, &reg_val,
+ sizeof(reg_val), XEON_LINK_STATUS_OFFSET);
+ if (ret < 0) {
+ NTB_LOG(ERR, "Unable to get link status.");
+ return -EIO;
+ }
+
+ hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
+
+ if (hw->link_status) {
+ hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
+ hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
+ } else {
+ hw->link_speed = NTB_SPEED_NONE;
+ hw->link_width = NTB_WIDTH_NONE;
+ }
+
+ return 0;
+}
+
+static int
+intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t ntb_ctrl, reg_off;
+ void *reg_addr;
+
+ reg_off = XEON_NTBCNTL_OFFSET;
+ reg_addr = hw->hw_addr + reg_off;
+ ntb_ctrl = rte_read32(reg_addr);
+
+ if (up) {
+ ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+ ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+ ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+ } else {
+ ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
+ ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
+ ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
+ }
+
+ rte_write32(ntb_ctrl, reg_addr);
+
+ return 0;
+}
+
+static uint32_t
+intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t spad_v, reg_off;
+ void *reg_addr;
+
+ if (spad < 0 || spad >= hw->spad_cnt) {
+ NTB_LOG(ERR, "Invalid spad reg index.");
+ return 0;
+ }
+
+ /* When peer is true, read peer spad reg */
+ reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
+ reg_addr = hw->hw_addr + reg_off + (spad << 2);
+ spad_v = rte_read32(reg_addr);
+
+ return spad_v;
+}
+
+static int
+intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
+ bool peer, uint32_t spad_v)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t reg_off;
+ void *reg_addr;
+
+ if (spad < 0 || spad >= hw->spad_cnt) {
+ NTB_LOG(ERR, "Invalid spad reg index.");
+ return -EINVAL;
+ }
+
+ /* When peer is true, write peer spad reg */
+ reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
+ reg_addr = hw->hw_addr + reg_off + (spad << 2);
+
+ rte_write32(spad_v, reg_addr);
+
+ return 0;
+}
+
+static uint64_t
+intel_ntb_db_read(const struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint64_t db_off, db_bits;
+ void *db_addr;
+
+ db_off = XEON_IM_INT_STATUS_OFFSET;
+ db_addr = hw->hw_addr + db_off;
+
+ db_bits = rte_read64(db_addr);
+
+ return db_bits;
+}
+
+static int
+intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint64_t db_off;
+ void *db_addr;
+
+ db_off = XEON_IM_INT_STATUS_OFFSET;
+ db_addr = hw->hw_addr + db_off;
+
+ rte_write64(db_bits, db_addr);
+
+ return 0;
+}
+
+static int
+intel_ntb_db_set_mask(const struct rte_rawdev *dev, uint64_t db_mask)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint64_t db_m_off;
+ void *db_m_addr;
+
+ db_m_off = XEON_IM_INT_DISABLE_OFFSET;
+ db_m_addr = hw->hw_addr + db_m_off;
+
+ db_mask |= hw->db_mask;
+
+ rte_write64(db_mask, db_m_addr);
+
+ hw->db_mask = db_mask;
+
+ return 0;
+}
+
+static int
+intel_ntb_peer_db_set(const struct rte_rawdev *dev, uint8_t db_idx)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t db_off;
+ void *db_addr;
+
+ if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
+ NTB_LOG(ERR, "Invalid doorbell.");
+ return -EINVAL;
+ }
+
+ db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
+ db_addr = hw->hw_addr + db_off;
+
+ rte_write32(1, db_addr);
+
+ return 0;
+}
+
+static int
+intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint8_t reg_off;
+ void *reg_addr;
+
+ if (intr >= hw->db_cnt) {
+ NTB_LOG(ERR, "Invalid intr source.");
+ return -EINVAL;
+ }
+
+ /* Bind intr source to msix vector */
+ reg_off = XEON_INTVEC_OFFSET;
+ reg_addr = hw->hw_addr + reg_off + intr;
+
+ rte_write8(msix, reg_addr);
+
+ return 0;
+}
+
+/* operations for primary side of local ntb */
+const struct ntb_dev_ops intel_ntb_ops = {
+ .ntb_dev_init = intel_ntb_dev_init,
+ .get_peer_mw_addr = intel_ntb_get_peer_mw_addr,
+ .mw_set_trans = intel_ntb_mw_set_trans,
+ .ioremap = intel_ntb_ioremap,
+ .get_link_status = intel_ntb_get_link_status,
+ .set_link = intel_ntb_set_link,
+ .spad_read = intel_ntb_spad_read,
+ .spad_write = intel_ntb_spad_write,
+ .db_read = intel_ntb_db_read,
+ .db_clear = intel_ntb_db_clear,
+ .db_set_mask = intel_ntb_db_set_mask,
+ .peer_db_set = intel_ntb_peer_db_set,
+ .vector_bind = intel_ntb_vector_bind,
+};
diff --git a/src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.h b/src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.h
new file mode 100644
index 000000000..4d1e64504
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/ntb_hw_intel.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef _NTB_HW_INTEL_H_
+#define _NTB_HW_INTEL_H_
+
+/* Ntb control and link status */
+#define NTB_CTL_CFG_LOCK 1
+#define NTB_CTL_DISABLE 2
+#define NTB_CTL_S2P_BAR2_SNOOP (1 << 2)
+#define NTB_CTL_P2S_BAR2_SNOOP (1 << 4)
+#define NTB_CTL_S2P_BAR4_SNOOP (1 << 6)
+#define NTB_CTL_P2S_BAR4_SNOOP (1 << 8)
+#define NTB_CTL_S2P_BAR5_SNOOP (1 << 12)
+#define NTB_CTL_P2S_BAR5_SNOOP (1 << 14)
+
+#define NTB_LNK_STA_ACTIVE_BIT 0x2000
+#define NTB_LNK_STA_SPEED_MASK 0x000f
+#define NTB_LNK_STA_WIDTH_MASK 0x03f0
+#define NTB_LNK_STA_ACTIVE(x) (!!((x) & NTB_LNK_STA_ACTIVE_BIT))
+#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK)
+#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
+
+/* Intel Skylake Xeon hardware */
+#define XEON_IMBAR1SZ_OFFSET 0x00d0
+#define XEON_IMBAR2SZ_OFFSET 0x00d1
+#define XEON_EMBAR1SZ_OFFSET 0x00d2
+#define XEON_EMBAR2SZ_OFFSET 0x00d3
+#define XEON_DEVCTRL_OFFSET 0x0098
+#define XEON_DEVSTS_OFFSET 0x009a
+#define XEON_UNCERRSTS_OFFSET 0x014c
+#define XEON_CORERRSTS_OFFSET 0x0158
+#define XEON_LINK_STATUS_OFFSET 0x01a2
+
+#define XEON_NTBCNTL_OFFSET 0x0000
+#define XEON_BAR_INTERVAL_OFFSET 0x0010
+#define XEON_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */
+#define XEON_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
+#define XEON_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
+#define XEON_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
+#define XEON_IM_INT_STATUS_OFFSET 0x0040
+#define XEON_IM_INT_DISABLE_OFFSET 0x0048
+#define XEON_IM_SPAD_OFFSET 0x0080 /* SPAD */
+#define XEON_USMEMMISS_OFFSET 0x0070
+#define XEON_INTVEC_OFFSET 0x00d0
+#define XEON_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
+#define XEON_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
+#define XEON_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
+#define XEON_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
+#define XEON_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
+#define XEON_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */
+#define XEON_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */
+#define XEON_EM_INT_STATUS_OFFSET 0x4040
+#define XEON_EM_INT_DISABLE_OFFSET 0x4048
+#define XEON_EM_SPAD_OFFSET 0x4080 /* remote SPAD */
+#define XEON_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */
+#define XEON_SPCICMD_OFFSET 0x4504 /* SPCICMD */
+#define XEON_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */
+#define XEON_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */
+#define XEON_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */
+
+#define XEON_PPD_OFFSET 0x00d4
+#define XEON_PPD_CONN_MASK 0x03
+#define XEON_PPD_CONN_TRANSPARENT 0x00
+#define XEON_PPD_CONN_B2B 0x01
+#define XEON_PPD_CONN_RP 0x02
+#define XEON_PPD_DEV_MASK 0x10
+#define XEON_PPD_DEV_USD 0x00
+#define XEON_PPD_DEV_DSD 0x10
+#define XEON_PPD_SPLIT_BAR_MASK 0x40
+
+
+#define XEON_MW_COUNT 2
+
+#define XEON_DB_COUNT 32
+#define XEON_DB_LINK 32
+#define XEON_DB_LINK_BIT (1ULL << XEON_DB_LINK)
+#define XEON_DB_MSIX_VECTOR_COUNT 33
+#define XEON_DB_MSIX_VECTOR_SHIFT 1
+#define XEON_DB_TOTAL_SHIFT 33
+#define XEON_SPAD_COUNT 16
+
+extern const struct ntb_dev_ops intel_ntb_ops;
+
+#endif /* _NTB_HW_INTEL_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ntb/rte_pmd_ntb.h b/src/spdk/dpdk/drivers/raw/ntb/rte_pmd_ntb.h
new file mode 100644
index 000000000..6591ce793
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/rte_pmd_ntb.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef _RTE_PMD_NTB_H_
+#define _RTE_PMD_NTB_H_
+
+/* App needs to set/get these attrs */
+#define NTB_QUEUE_SZ_NAME "queue_size"
+#define NTB_QUEUE_NUM_NAME "queue_num"
+#define NTB_TOPO_NAME "topo"
+#define NTB_LINK_STATUS_NAME "link_status"
+#define NTB_SPEED_NAME "speed"
+#define NTB_WIDTH_NAME "width"
+#define NTB_MW_CNT_NAME "mw_count"
+#define NTB_DB_CNT_NAME "db_count"
+#define NTB_SPAD_CNT_NAME "spad_count"
+
+#define NTB_MAX_DESC_SIZE 1024
+#define NTB_MIN_DESC_SIZE 64
+
+struct ntb_dev_info {
+ uint32_t ntb_hdr_size;
+ /**< memzone needs to be mw size align or not. */
+ uint8_t mw_size_align;
+ uint8_t mw_cnt;
+ uint64_t *mw_size;
+};
+
+struct ntb_dev_config {
+ uint16_t num_queues;
+ uint16_t queue_size;
+ uint8_t mz_num;
+ const struct rte_memzone **mz_list;
+};
+
+struct ntb_queue_conf {
+ uint16_t nb_desc;
+ uint16_t tx_free_thresh;
+ struct rte_mempool *rx_mp;
+};
+
+#endif /* _RTE_PMD_NTB_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ntb/rte_rawdev_ntb_version.map b/src/spdk/dpdk/drivers/raw/ntb/rte_rawdev_ntb_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ntb/rte_rawdev_ntb_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/Makefile b/src/spdk/dpdk/drivers/raw/octeontx2_dma/Makefile
new file mode 100644
index 000000000..c64ca3497
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_rawdev_octeontx2_dma.a
+
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2/
+CFLAGS += -I$(RTE_SDK)/drivers/raw/octeontx2_dma/
+LDLIBS += -lrte_eal -lrte_rawdev -lrte_bus_pci -lrte_mbuf
+LDLIBS += -lrte_common_octeontx2 -lrte_mempool
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_rawdev_octeontx2_dma_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += otx2_dpi_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += otx2_dpi_msg.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += otx2_dpi_test.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/meson.build b/src/spdk/dpdk/drivers/raw/octeontx2_dma/meson.build
new file mode 100644
index 000000000..11f74680a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+deps += ['bus_pci', 'common_octeontx2', 'rawdev']
+sources = files('otx2_dpi_rawdev.c', 'otx2_dpi_msg.c', 'otx2_dpi_test.c')
+
+extra_flags = []
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_msg.c b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_msg.c
new file mode 100644
index 000000000..aa361cb8a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_msg.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _DPI_MSG_H_
+#define _DPI_MSG_H_
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "otx2_dpi_rawdev.h"
+
+/* DPI PF DBDF information macro's */
+#define DPI_PF_DBDF_DOMAIN 0
+#define DPI_PF_DBDF_BUS 5
+#define DPI_PF_DBDF_DEVICE 0
+#define DPI_PF_DBDF_FUNCTION 0
+
+#define DPI_PF_MBOX_SYSFS_ENTRY "dpi_device_config"
+
+union dpi_mbox_message_u {
+ uint64_t u[2];
+ struct dpi_mbox_message_s {
+ /* VF ID to configure */
+ uint64_t vfid :4;
+ /* Command code */
+ uint64_t cmd :4;
+ /* Command buffer size in 8-byte words */
+ uint64_t csize :14;
+ /* aura of the command buffer */
+ uint64_t aura :20;
+ /* SSO PF function */
+ uint64_t sso_pf_func :16;
+ /* NPA PF function */
+ uint64_t npa_pf_func :16;
+ } s;
+};
+
+static inline int
+send_msg_to_pf(const char *value, int size)
+{
+ char buff[255] = { 0 };
+ int res, fd;
+
+ res = snprintf(buff, sizeof(buff), "%s/" PCI_PRI_FMT "/%s",
+ rte_pci_get_sysfs_path(), DPI_PF_DBDF_DOMAIN,
+ DPI_PF_DBDF_BUS, DPI_PF_DBDF_DEVICE & 0x7,
+ DPI_PF_DBDF_FUNCTION & 0x7, DPI_PF_MBOX_SYSFS_ENTRY);
+ if ((res < 0) || ((size_t)res > sizeof(buff)))
+ return -ERANGE;
+
+ fd = open(buff, O_WRONLY);
+ if (fd < 0)
+ return -EACCES;
+ res = write(fd, value, size);
+ close(fd);
+ if (res < 0)
+ return -EACCES;
+
+ return 0;
+}
+
+int
+otx2_dpi_queue_open(uint16_t vf_id, uint32_t size, uint32_t gaura)
+{
+ union dpi_mbox_message_u mbox_msg;
+ int ret = 0;
+
+ /* DPI PF driver expects vfid starts from index 0 */
+ mbox_msg.s.vfid = vf_id;
+ mbox_msg.s.cmd = DPI_QUEUE_OPEN;
+ mbox_msg.s.csize = size;
+ mbox_msg.s.aura = gaura;
+ mbox_msg.s.sso_pf_func = otx2_sso_pf_func_get();
+ mbox_msg.s.npa_pf_func = otx2_npa_pf_func_get();
+
+ ret = send_msg_to_pf((const char *)&mbox_msg,
+ sizeof(mbox_msg));
+ if (ret < 0)
+ otx2_dpi_dbg("Failed to send mbox message to dpi pf");
+
+ return ret;
+}
+
+int
+otx2_dpi_queue_close(uint16_t vf_id)
+{
+ union dpi_mbox_message_u mbox_msg;
+ int ret = 0;
+
+ /* DPI PF driver expects vfid starts from index 0 */
+ mbox_msg.s.vfid = vf_id;
+ mbox_msg.s.cmd = DPI_QUEUE_CLOSE;
+
+ ret = send_msg_to_pf((const char *)&mbox_msg,
+ sizeof(mbox_msg));
+ if (ret < 0)
+ otx2_dpi_dbg("Failed to send mbox message to dpi pf");
+
+ return ret;
+}
+
+#endif /* _DPI_MSG_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
new file mode 100644
index 000000000..e398abb75
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.c
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_bus.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_mempool.h>
+#include <rte_pci.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include <otx2_common.h>
+
+#include "otx2_dpi_rawdev.h"
+
+static const struct rte_pci_id pci_dma_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_DPI_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+/* Enable/Disable DMA queue */
+static inline int
+dma_engine_enb_dis(struct dpi_vf_s *dpivf, const bool enb)
+{
+ if (enb)
+ otx2_write64(0x1, dpivf->vf_bar0 + DPI_VDMA_EN);
+ else
+ otx2_write64(0x0, dpivf->vf_bar0 + DPI_VDMA_EN);
+
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+/* Free DMA Queue instruction buffers, and send close notification to PF */
+static inline int
+dma_queue_finish(struct dpi_vf_s *dpivf)
+{
+ uint32_t timeout = 0, sleep = 1;
+ uint64_t reg = 0ULL;
+
+ /* Wait for SADDR to become idle */
+ reg = otx2_read64(dpivf->vf_bar0 + DPI_VDMA_SADDR);
+ while (!(reg & BIT_ULL(DPI_VDMA_SADDR_REQ_IDLE))) {
+ rte_delay_ms(sleep);
+ timeout++;
+ if (timeout >= DPI_QFINISH_TIMEOUT) {
+ otx2_dpi_dbg("Timeout!!! Closing Forcibly");
+ break;
+ }
+ reg = otx2_read64(dpivf->vf_bar0 + DPI_VDMA_SADDR);
+ }
+
+ if (otx2_dpi_queue_close(dpivf->vf_id) < 0)
+ return -EACCES;
+
+ rte_mempool_put(dpivf->chunk_pool, dpivf->base_ptr);
+ dpivf->vf_bar0 = (uintptr_t)NULL;
+
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+/* Write an arbitrary number of command words to a command queue */
+static __rte_always_inline enum dpi_dma_queue_result_e
+dma_queue_write(struct dpi_vf_s *dpi, uint16_t cmd_count, uint64_t *cmds)
+{
+ if ((cmd_count < 1) || (cmd_count > 64))
+ return DPI_DMA_QUEUE_INVALID_PARAM;
+
+ if (cmds == NULL)
+ return DPI_DMA_QUEUE_INVALID_PARAM;
+
+ /* Room available in the current buffer for the command */
+ if (dpi->index + cmd_count < dpi->pool_size_m1) {
+ uint64_t *ptr = dpi->base_ptr;
+
+ ptr += dpi->index;
+ dpi->index += cmd_count;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ } else {
+ void *new_buffer;
+ uint64_t *ptr;
+ int count;
+
+ /* Allocate new command buffer, return if failed */
+ if (rte_mempool_get(dpi->chunk_pool, &new_buffer) ||
+ new_buffer == NULL) {
+ return DPI_DMA_QUEUE_NO_MEMORY;
+ }
+ ptr = dpi->base_ptr;
+ /* Figure out how many command words will fit in this buffer.
+ * One location will be needed for the next buffer pointer.
+ **/
+ count = dpi->pool_size_m1 - dpi->index;
+ ptr += dpi->index;
+ cmd_count -= count;
+ while (count--)
+ *ptr++ = *cmds++;
+ /* Chunk next ptr is 2DWORDs, second DWORD is reserved. */
+ *ptr++ = (uint64_t)new_buffer;
+ *ptr = 0;
+ /* The current buffer is full and has a link to the next buffer.
+ * Time to write the rest of the commands into the new buffer.
+ **/
+ dpi->base_ptr = new_buffer;
+ dpi->index = cmd_count;
+ ptr = new_buffer;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ /* queue index may greater than pool size */
+ if (dpi->index >= dpi->pool_size_m1) {
+ if (rte_mempool_get(dpi->chunk_pool, &new_buffer) ||
+ new_buffer == NULL) {
+ return DPI_DMA_QUEUE_NO_MEMORY;
+ }
+ /* Write next buffer address */
+ *ptr = (uint64_t)new_buffer;
+ dpi->base_ptr = new_buffer;
+ dpi->index = 0;
+ }
+ }
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+/* Submit a DMA command to the DMA queues. */
+static __rte_always_inline int
+dma_queue_submit(struct rte_rawdev *dev, uint16_t cmd_count, uint64_t *cmds)
+{
+ struct dpi_vf_s *dpivf = dev->dev_private;
+ enum dpi_dma_queue_result_e result;
+
+ result = dma_queue_write(dpivf, cmd_count, cmds);
+ rte_wmb();
+ if (likely(result == DPI_DMA_QUEUE_SUCCESS))
+ otx2_write64((uint64_t)cmd_count,
+ dpivf->vf_bar0 + DPI_VDMA_DBELL);
+
+ return result;
+}
+
+/* Enqueue buffers to DMA queue
+ * returns number of buffers enqueued successfully
+ */
+static int
+otx2_dpi_rawdev_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count, rte_rawdev_obj_t context)
+{
+ struct dpi_dma_queue_ctx_s *ctx = (struct dpi_dma_queue_ctx_s *)context;
+ struct dpi_dma_buf_ptr_s *cmd;
+ uint32_t c = 0;
+
+ for (c = 0; c < count; c++) {
+ uint64_t dpi_cmd[DPI_DMA_CMD_SIZE] = {0};
+ union dpi_dma_instr_hdr_u *hdr;
+ uint16_t index = 0, i;
+
+ hdr = (union dpi_dma_instr_hdr_u *)&dpi_cmd[0];
+ cmd = (struct dpi_dma_buf_ptr_s *)buffers[c]->buf_addr;
+
+ hdr->s.xtype = ctx->xtype & DPI_XTYPE_MASK;
+ hdr->s.pt = ctx->pt & DPI_HDR_PT_MASK;
+ /* Request initiated with byte write completion, but completion
+ * pointer not provided
+ */
+ if ((hdr->s.pt == DPI_HDR_PT_ZBW_CA ||
+ hdr->s.pt == DPI_HDR_PT_ZBW_NC) && cmd->comp_ptr == NULL)
+ return c;
+
+ cmd->comp_ptr->cdata = DPI_REQ_CDATA;
+ hdr->s.ptr = (uint64_t)cmd->comp_ptr;
+ hdr->s.deallocv = ctx->deallocv;
+ hdr->s.tt = ctx->tt & DPI_W0_TT_MASK;
+ hdr->s.grp = ctx->grp & DPI_W0_GRP_MASK;
+
+ /* If caller provides completion ring details, then only queue
+ * completion address for later polling.
+ */
+ if (ctx->c_ring) {
+ ctx->c_ring->compl_data[ctx->c_ring->tail] =
+ cmd->comp_ptr;
+ STRM_INC(ctx->c_ring);
+ }
+
+ if (hdr->s.deallocv)
+ hdr->s.pvfe = 1;
+
+ if (hdr->s.pt == DPI_HDR_PT_WQP)
+ hdr->s.ptr = hdr->s.ptr | DPI_HDR_PT_WQP_STATUSNC;
+
+ index += 4;
+ hdr->s.fport = 0;
+ hdr->s.lport = 0;
+
+ /* For inbound case, src pointers are last pointers.
+ * For all other cases, src pointers are first pointers.
+ */
+ if (ctx->xtype == DPI_XTYPE_INBOUND) {
+ hdr->s.nfst = cmd->wptr_cnt & DPI_MAX_POINTER;
+ hdr->s.nlst = cmd->rptr_cnt & DPI_MAX_POINTER;
+ for (i = 0; i < hdr->s.nfst; i++) {
+ dpi_cmd[index++] = cmd->wptr[i]->u[0];
+ dpi_cmd[index++] = cmd->wptr[i]->u[1];
+ }
+ for (i = 0; i < hdr->s.nlst; i++) {
+ dpi_cmd[index++] = cmd->rptr[i]->u[0];
+ dpi_cmd[index++] = cmd->rptr[i]->u[1];
+ }
+ } else {
+ hdr->s.nfst = cmd->rptr_cnt & DPI_MAX_POINTER;
+ hdr->s.nlst = cmd->wptr_cnt & DPI_MAX_POINTER;
+ for (i = 0; i < hdr->s.nfst; i++) {
+ dpi_cmd[index++] = cmd->rptr[i]->u[0];
+ dpi_cmd[index++] = cmd->rptr[i]->u[1];
+ }
+ for (i = 0; i < hdr->s.nlst; i++) {
+ dpi_cmd[index++] = cmd->wptr[i]->u[0];
+ dpi_cmd[index++] = cmd->wptr[i]->u[1];
+ }
+ }
+ if (dma_queue_submit(dev, index, dpi_cmd))
+ return c;
+ }
+ return c;
+}
+
+/* Check for command completion, returns number of commands completed */
+static int
+otx2_dpi_rawdev_dequeue_bufs(struct rte_rawdev *dev __rte_unused,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count, rte_rawdev_obj_t context)
+{
+ struct dpi_dma_queue_ctx_s *ctx = (struct dpi_dma_queue_ctx_s *)context;
+ unsigned int i = 0, headp;
+
+ /* No completion ring to poll */
+ if (ctx->c_ring == NULL)
+ return 0;
+
+ headp = ctx->c_ring->head;
+ for (i = 0; i < count && (headp != ctx->c_ring->tail); i++) {
+ struct dpi_dma_req_compl_s *comp_ptr =
+ ctx->c_ring->compl_data[headp];
+
+ if (comp_ptr->cdata)
+ break;
+
+ /* Request Completed */
+ buffers[i] = (void *)comp_ptr;
+ headp = (headp + 1) % ctx->c_ring->max_cnt;
+ }
+ ctx->c_ring->head = headp;
+
+ return i;
+}
+
+static int
+otx2_dpi_rawdev_start(struct rte_rawdev *dev)
+{
+ dev->started = DPI_QUEUE_START;
+
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+static void
+otx2_dpi_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = DPI_QUEUE_STOP;
+}
+
+static int
+otx2_dpi_rawdev_close(struct rte_rawdev *dev)
+{
+ dma_engine_enb_dis(dev->dev_private, false);
+ dma_queue_finish(dev->dev_private);
+
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+static int
+otx2_dpi_rawdev_reset(struct rte_rawdev *dev)
+{
+ return dev ? DPI_QUEUE_STOP : DPI_QUEUE_START;
+}
+
+static int
+otx2_dpi_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
+{
+ struct dpi_rawdev_conf_s *conf = config;
+ struct dpi_vf_s *dpivf = NULL;
+ void *buf = NULL;
+ uintptr_t pool;
+ uint32_t gaura;
+
+ if (conf == NULL) {
+ otx2_dpi_dbg("NULL configuration");
+ return -EINVAL;
+ }
+ dpivf = (struct dpi_vf_s *)dev->dev_private;
+ dpivf->chunk_pool = conf->chunk_pool;
+ if (rte_mempool_get(conf->chunk_pool, &buf) || (buf == NULL)) {
+ otx2_err("Unable allocate buffer");
+ return -ENODEV;
+ }
+ dpivf->base_ptr = buf;
+ otx2_write64(0x0, dpivf->vf_bar0 + DPI_VDMA_EN);
+ dpivf->pool_size_m1 = (DPI_CHUNK_SIZE >> 3) - 2;
+ pool = (uintptr_t)((struct rte_mempool *)conf->chunk_pool)->pool_id;
+ gaura = npa_lf_aura_handle_to_aura(pool);
+ otx2_write64(0, dpivf->vf_bar0 + DPI_VDMA_REQQ_CTL);
+ otx2_write64(((uint64_t)buf >> 7) << 7,
+ dpivf->vf_bar0 + DPI_VDMA_SADDR);
+ if (otx2_dpi_queue_open(dpivf->vf_id, DPI_CHUNK_SIZE, gaura) < 0) {
+ otx2_err("Unable to open DPI VF %d", dpivf->vf_id);
+ rte_mempool_put(conf->chunk_pool, buf);
+ return -EACCES;
+ }
+ dma_engine_enb_dis(dpivf, true);
+
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+static const struct rte_rawdev_ops dpi_rawdev_ops = {
+ .dev_configure = otx2_dpi_rawdev_configure,
+ .dev_start = otx2_dpi_rawdev_start,
+ .dev_stop = otx2_dpi_rawdev_stop,
+ .dev_close = otx2_dpi_rawdev_close,
+ .dev_reset = otx2_dpi_rawdev_reset,
+ .enqueue_bufs = otx2_dpi_rawdev_enqueue_bufs,
+ .dequeue_bufs = otx2_dpi_rawdev_dequeue_bufs,
+ .dev_selftest = test_otx2_dma_rawdev,
+};
+
+static int
+otx2_dpi_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct dpi_vf_s *dpivf = NULL;
+ struct rte_rawdev *rawdev;
+ uint16_t vf_id;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return DPI_DMA_QUEUE_SUCCESS;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ otx2_dpi_dbg("Empty bars %p %p", pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "DPI:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpi_vf_s),
+ rte_socket_id());
+ if (rawdev == NULL) {
+ otx2_err("Rawdev allocation failed");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &dpi_rawdev_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->driver->driver.name;
+
+ dpivf = rawdev->dev_private;
+ if (dpivf->state != DPI_QUEUE_STOP) {
+ otx2_dpi_dbg("Device already started!!!");
+ return -ENODEV;
+ }
+
+ vf_id = ((pci_dev->addr.devid & 0x1F) << 3) |
+ (pci_dev->addr.function & 0x7);
+ vf_id -= 1;
+ dpivf->state = DPI_QUEUE_START;
+ dpivf->vf_id = vf_id;
+ dpivf->vf_bar0 = (uintptr_t)pci_dev->mem_resource[0].addr;
+ dpivf->vf_bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
+
+ return DPI_DMA_QUEUE_SUCCESS;
+}
+
+static int
+otx2_dpi_rawdev_remove(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_rawdev *rawdev;
+ struct dpi_vf_s *dpivf;
+
+ if (pci_dev == NULL) {
+ otx2_dpi_dbg("Invalid pci_dev of the device!");
+ return -EINVAL;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "DPI:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (rawdev == NULL) {
+ otx2_dpi_dbg("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ dpivf = (struct dpi_vf_s *)rawdev->dev_private;
+ dma_engine_enb_dis(dpivf, false);
+ dma_queue_finish(dpivf);
+
+ /* rte_rawdev_close is called by pmd_release */
+ return rte_rawdev_pmd_release(rawdev);
+}
+
+static struct rte_pci_driver rte_dpi_rawdev_pmd = {
+ .id_table = pci_dma_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = otx2_dpi_rawdev_probe,
+ .remove = otx2_dpi_rawdev_remove,
+};
+
+RTE_PMD_REGISTER_PCI(dpi_rawdev_pci_driver, rte_dpi_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(dpi_rawdev_pci_driver, pci_dma_map);
+RTE_PMD_REGISTER_KMOD_DEP(dpi_rawdev_pci_driver, "vfio-pci");
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.h b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.h
new file mode 100644
index 000000000..9ebb25988
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_rawdev.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _DPI_RAWDEV_H_
+#define _DPI_RAWDEV_H_
+
+#include "otx2_common.h"
+#include "otx2_mempool.h"
+
+#define DPI_QUEUE_OPEN 0x1
+#define DPI_QUEUE_CLOSE 0x2
+
+/* DPI VF register offsets from VF_BAR0 */
+#define DPI_VDMA_EN (0x0)
+#define DPI_VDMA_REQQ_CTL (0x8)
+#define DPI_VDMA_DBELL (0x10)
+#define DPI_VDMA_SADDR (0x18)
+#define DPI_VDMA_COUNTS (0x20)
+#define DPI_VDMA_NADDR (0x28)
+#define DPI_VDMA_IWBUSY (0x30)
+#define DPI_VDMA_CNT (0x38)
+#define DPI_VF_INT (0x100)
+#define DPI_VF_INT_W1S (0x108)
+#define DPI_VF_INT_ENA_W1C (0x110)
+#define DPI_VF_INT_ENA_W1S (0x118)
+
+#define DPI_MAX_VFS 8
+#define DPI_DMA_CMD_SIZE 64
+#define DPI_CHUNK_SIZE 1024
+#define DPI_QUEUE_STOP 0x0
+#define DPI_QUEUE_START 0x1
+
+#define DPI_VDMA_SADDR_REQ_IDLE 63
+#define DPI_MAX_POINTER 15
+#define STRM_INC(s) ((s)->tail = ((s)->tail + 1) % (s)->max_cnt)
+#define DPI_QFINISH_TIMEOUT (10 * 1000)
+
+/* DPI Transfer Type, pointer type in DPI_DMA_INSTR_HDR_S[XTYPE] */
+#define DPI_XTYPE_OUTBOUND (0)
+#define DPI_XTYPE_INBOUND (1)
+#define DPI_XTYPE_INTERNAL_ONLY (2)
+#define DPI_XTYPE_EXTERNAL_ONLY (3)
+#define DPI_XTYPE_MASK 0x3
+#define DPI_HDR_PT_ZBW_CA 0x0
+#define DPI_HDR_PT_ZBW_NC 0x1
+#define DPI_HDR_PT_WQP 0x2
+#define DPI_HDR_PT_WQP_NOSTATUS 0x0
+#define DPI_HDR_PT_WQP_STATUSCA 0x1
+#define DPI_HDR_PT_WQP_STATUSNC 0x3
+#define DPI_HDR_PT_CNT 0x3
+#define DPI_HDR_PT_MASK 0x3
+#define DPI_W0_TT_MASK 0x3
+#define DPI_W0_GRP_MASK 0x3FF
+/* Set Completion data to 0xFF when request submitted,
+ * upon successful request completion engine reset to completion status
+ */
+#define DPI_REQ_CDATA 0xFF
+
+struct dpi_vf_s {
+ struct rte_pci_device *dev;
+ uint8_t state;
+ uint16_t vf_id;
+ uint8_t domain;
+ uintptr_t vf_bar0;
+ uintptr_t vf_bar2;
+
+ uint16_t pool_size_m1;
+ uint16_t index;
+ uint64_t *base_ptr;
+ void *chunk_pool;
+ struct otx2_mbox *mbox;
+};
+
+struct dpi_rawdev_conf_s {
+ void *chunk_pool;
+};
+
+enum dpi_dma_queue_result_e {
+ DPI_DMA_QUEUE_SUCCESS = 0,
+ DPI_DMA_QUEUE_NO_MEMORY = -1,
+ DPI_DMA_QUEUE_INVALID_PARAM = -2,
+};
+
+struct dpi_dma_req_compl_s {
+ uint64_t cdata;
+ void (*compl_cb)(void *dev, void *arg);
+ void *cb_data;
+};
+
+union dpi_dma_ptr_u {
+ uint64_t u[2];
+ struct dpi_dma_s {
+ uint64_t length:16;
+ uint64_t reserved:44;
+ uint64_t bed:1; /* Big-Endian */
+ uint64_t alloc_l2:1;
+ uint64_t full_write:1;
+ uint64_t invert:1;
+ uint64_t ptr;
+ } s;
+};
+
+struct dpi_dma_buf_ptr_s {
+ union dpi_dma_ptr_u *rptr[DPI_MAX_POINTER]; /* Read From pointer list */
+ union dpi_dma_ptr_u *wptr[DPI_MAX_POINTER]; /* Write to pointer list */
+ uint8_t rptr_cnt;
+ uint8_t wptr_cnt;
+ struct dpi_dma_req_compl_s *comp_ptr;
+};
+
+struct dpi_cring_data_s {
+ struct dpi_dma_req_compl_s **compl_data;
+ uint16_t max_cnt;
+ uint16_t head;
+ uint16_t tail;
+};
+
+struct dpi_dma_queue_ctx_s {
+ uint16_t xtype:2;
+
+ /* Completion pointer type */
+ uint16_t pt:2;
+
+ /* Completion updated using WQE */
+ uint16_t tt:2;
+ uint16_t grp:10;
+ uint32_t tag;
+
+ /* Valid only for Outbound only mode */
+ uint16_t aura:12;
+ uint16_t csel:1;
+ uint16_t ca:1;
+ uint16_t fi:1;
+ uint16_t ii:1;
+ uint16_t fl:1;
+
+ uint16_t pvfe:1;
+ uint16_t dealloce:1;
+ uint16_t req_type:2;
+ uint16_t use_lock:1;
+ uint16_t deallocv;
+
+ struct dpi_cring_data_s *c_ring;
+};
+
+/* DPI DMA Instruction Header Format */
+union dpi_dma_instr_hdr_u {
+ uint64_t u[4];
+
+ struct dpi_dma_instr_hdr_s_s {
+ uint64_t tag:32;
+ uint64_t tt:2;
+ uint64_t grp:10;
+ uint64_t reserved_44_47:4;
+ uint64_t nfst:4;
+ uint64_t reserved_52_53:2;
+ uint64_t nlst:4;
+ uint64_t reserved_58_63:6;
+ /* Word 0 - End */
+
+ uint64_t aura:12;
+ uint64_t reserved_76_79:4;
+ uint64_t deallocv:16;
+ uint64_t dealloce:1;
+ uint64_t pvfe:1;
+ uint64_t reserved_98_99:2;
+ uint64_t pt:2;
+ uint64_t reserved_102_103:2;
+ uint64_t fl:1;
+ uint64_t ii:1;
+ uint64_t fi:1;
+ uint64_t ca:1;
+ uint64_t csel:1;
+ uint64_t reserved_109_111:3;
+ uint64_t xtype:2;
+ uint64_t reserved_114_119:6;
+ uint64_t fport:2;
+ uint64_t reserved_122_123:2;
+ uint64_t lport:2;
+ uint64_t reserved_126_127:2;
+ /* Word 1 - End */
+
+ uint64_t ptr:64;
+ /* Word 2 - End */
+
+ uint64_t reserved_192_255:64;
+ /* Word 3 - End */
+ } s;
+};
+
+int otx2_dpi_queue_open(uint16_t vf_id, uint32_t size, uint32_t gaura);
+int otx2_dpi_queue_close(uint16_t vf_id);
+int test_otx2_dma_rawdev(uint16_t val);
+
+#endif /* _DPI_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_test.c b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_test.c
new file mode 100644
index 000000000..276658af0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/otx2_dpi_test.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_per_lcore.h>
+#include <rte_rawdev.h>
+
+#include "otx2_dpi_rawdev.h"
+
+static struct dpi_cring_data_s cring;
+
+static uint8_t
+buffer_fill(uint8_t *addr, int len, uint8_t val)
+{
+ int j = 0;
+
+ memset(addr, 0, len);
+ for (j = 0; j < len; j++)
+ *(addr + j) = val++;
+
+ return val;
+}
+
+static int
+validate_buffer(uint8_t *saddr, uint8_t *daddr, int len)
+{
+ int j = 0, ret = 0;
+
+ for (j = 0; j < len; j++) {
+ if (*(saddr + j) != *(daddr + j)) {
+ otx2_dpi_dbg("FAIL: Data Integrity failed");
+ otx2_dpi_dbg("index: %d, Expected: 0x%x, Actual: 0x%x",
+ j, *(saddr + j), *(daddr + j));
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static inline int
+dma_test_internal(int dma_port, int buf_size)
+{
+ struct dpi_dma_req_compl_s *comp_data;
+ struct dpi_dma_queue_ctx_s ctx = {0};
+ struct rte_rawdev_buf buf = {0};
+ struct rte_rawdev_buf *d_buf[1];
+ struct rte_rawdev_buf *bufp[1];
+ struct dpi_dma_buf_ptr_s cmd;
+ union dpi_dma_ptr_u rptr = { {0} };
+ union dpi_dma_ptr_u wptr = { {0} };
+ uint8_t *fptr, *lptr;
+ int ret;
+
+ fptr = (uint8_t *)rte_malloc("dummy", buf_size, 128);
+ lptr = (uint8_t *)rte_malloc("dummy", buf_size, 128);
+ comp_data = rte_malloc("dummy", buf_size, 128);
+ if (fptr == NULL || lptr == NULL || comp_data == NULL) {
+ otx2_dpi_dbg("Unable to allocate internal memory");
+ return -ENOMEM;
+ }
+
+ buffer_fill(fptr, buf_size, 0);
+ memset(&cmd, 0, sizeof(struct dpi_dma_buf_ptr_s));
+ memset(lptr, 0, buf_size);
+ memset(comp_data, 0, buf_size);
+ rptr.s.ptr = (uint64_t)fptr;
+ rptr.s.length = buf_size;
+ wptr.s.ptr = (uint64_t)lptr;
+ wptr.s.length = buf_size;
+ cmd.rptr[0] = &rptr;
+ cmd.wptr[0] = &wptr;
+ cmd.rptr_cnt = 1;
+ cmd.wptr_cnt = 1;
+ cmd.comp_ptr = comp_data;
+ buf.buf_addr = (void *)&cmd;
+ bufp[0] = &buf;
+
+ ctx.xtype = DPI_XTYPE_INTERNAL_ONLY;
+ ctx.pt = 0;
+ ctx.c_ring = &cring;
+
+ ret = rte_rawdev_enqueue_buffers(dma_port,
+ (struct rte_rawdev_buf **)bufp, 1,
+ &ctx);
+ if (ret < 0) {
+ otx2_dpi_dbg("Enqueue request failed");
+ return 0;
+ }
+
+ /* Wait and dequeue completion */
+ do {
+ sleep(1);
+ ret = rte_rawdev_dequeue_buffers(dma_port, &d_buf[0], 1, &ctx);
+ if (ret)
+ break;
+
+ otx2_dpi_dbg("Dequeue request not completed");
+ } while (1);
+
+ if (validate_buffer(fptr, lptr, buf_size)) {
+ otx2_dpi_dbg("DMA transfer failed\n");
+ return -EAGAIN;
+ }
+ otx2_dpi_dbg("Internal Only DMA transfer successfully completed");
+
+ if (lptr)
+ rte_free(lptr);
+ if (fptr)
+ rte_free(fptr);
+ if (comp_data)
+ rte_free(comp_data);
+
+ return 0;
+}
+
+static void *
+dpi_create_mempool(void)
+{
+ void *chunk_pool = NULL;
+ char pool_name[25];
+ int ret;
+
+ snprintf(pool_name, sizeof(pool_name), "dpi_chunk_pool");
+
+ chunk_pool = (void *)rte_mempool_create_empty(pool_name, 1024, 1024,
+ 0, 0, rte_socket_id(), 0);
+ if (chunk_pool == NULL) {
+ otx2_dpi_dbg("Unable to create memory pool.");
+ return NULL;
+ }
+
+ ret = rte_mempool_set_ops_byname(chunk_pool,
+ rte_mbuf_platform_mempool_ops(), NULL);
+ if (ret < 0) {
+ otx2_dpi_dbg("Unable to set pool ops");
+ rte_mempool_free(chunk_pool);
+ return NULL;
+ }
+
+ ret = rte_mempool_populate_default(chunk_pool);
+ if (ret < 0) {
+ otx2_dpi_dbg("Unable to populate pool");
+ return NULL;
+ }
+
+ return chunk_pool;
+}
+
+int
+test_otx2_dma_rawdev(uint16_t val)
+{
+ struct rte_rawdev_info rdev_info = {0};
+ struct dpi_rawdev_conf_s conf = {0};
+ int ret, i, size = 1024;
+ int nb_ports;
+
+ RTE_SET_USED(val);
+ nb_ports = rte_rawdev_count();
+ if (nb_ports == 0) {
+ otx2_dpi_dbg("No Rawdev ports - bye");
+ return -ENODEV;
+ }
+
+ i = rte_rawdev_get_dev_id("DPI:5:00.1");
+ /* Configure rawdev ports */
+ conf.chunk_pool = dpi_create_mempool();
+ rdev_info.dev_private = &conf;
+ ret = rte_rawdev_configure(i, (rte_rawdev_obj_t)&rdev_info);
+ if (ret) {
+ otx2_dpi_dbg("Unable to configure DPIVF %d", i);
+ return -ENODEV;
+ }
+ otx2_dpi_dbg("rawdev %d configured successfully", i);
+
+ /* Each stream allocate its own completion ring data, store it in
+ * application context. Each stream needs to use same application
+ * context for enqueue/dequeue.
+ */
+ cring.compl_data = rte_malloc("dummy", sizeof(void *) * 1024, 128);
+ if (!cring.compl_data) {
+ otx2_dpi_dbg("Completion allocation failed");
+ return -ENOMEM;
+ }
+
+ cring.max_cnt = 1024;
+ cring.head = 0;
+ cring.tail = 0;
+
+ ret = dma_test_internal(i, size);
+ if (ret)
+ otx2_dpi_dbg("DMA transfer failed for queue %d", i);
+
+ if (rte_rawdev_close(i))
+ otx2_dpi_dbg("Dev close failed for port %d", i);
+
+ if (conf.chunk_pool)
+ rte_mempool_free(conf.chunk_pool);
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_dma/rte_rawdev_octeontx2_dma_version.map b/src/spdk/dpdk/drivers/raw/octeontx2_dma/rte_rawdev_octeontx2_dma_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_dma/rte_rawdev_octeontx2_dma_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile b/src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile
new file mode 100644
index 000000000..1a54bf56f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name
+LIB = librte_rawdev_octeontx2_ep.a
+
+# Build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2/
+CFLAGS += -I$(RTE_SDK)/drivers/raw/octeontx2_ep/
+
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_common_octeontx2
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_rawdev_octeontx2_ep_version.map
+
+#
+# All source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_enqdeq.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_test.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_vf.c
+
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build b/src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build
new file mode 100644
index 000000000..0e6338f76
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+deps += ['bus_pci', 'common_octeontx2', 'rawdev']
+sources = files('otx2_ep_rawdev.c',
+ 'otx2_ep_enqdeq.c',
+ 'otx2_ep_test.c',
+ 'otx2_ep_vf.c')
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c
new file mode 100644
index 000000000..9f1e5eda6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c
@@ -0,0 +1,846 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+
+#include <rte_bus.h>
+#include <rte_bus_pci.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_mempool.h>
+#include <rte_pci.h>
+
+#include <rte_common.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "otx2_common.h"
+#include "otx2_ep_enqdeq.h"
+
+static void
+sdp_dmazone_free(const struct rte_memzone *mz)
+{
+ const struct rte_memzone *mz_tmp;
+ int ret = 0;
+
+ if (mz == NULL) {
+ otx2_err("Memzone %s : NULL", mz->name);
+ return;
+ }
+
+ mz_tmp = rte_memzone_lookup(mz->name);
+ if (mz_tmp == NULL) {
+ otx2_err("Memzone %s Not Found", mz->name);
+ return;
+ }
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ otx2_err("Memzone free failed : ret = %d", ret);
+
+}
+
+/* Free IQ resources */
+int
+sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
+{
+ struct sdp_instr_queue *iq;
+
+ iq = sdpvf->instr_queue[iq_no];
+ if (iq == NULL) {
+ otx2_err("Invalid IQ[%d]\n", iq_no);
+ return -ENOMEM;
+ }
+
+ rte_free(iq->req_list);
+ iq->req_list = NULL;
+
+ if (iq->iq_mz) {
+ sdp_dmazone_free(iq->iq_mz);
+ iq->iq_mz = NULL;
+ }
+
+ rte_free(sdpvf->instr_queue[iq_no]);
+ sdpvf->instr_queue[iq_no] = NULL;
+
+ sdpvf->num_iqs--;
+
+ otx2_info("IQ[%d] is deleted", iq_no);
+
+ return 0;
+}
+
+/* IQ initialization */
+static int
+sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no)
+{
+ const struct sdp_config *conf;
+ struct sdp_instr_queue *iq;
+ uint32_t q_size;
+
+ conf = sdpvf->conf;
+ iq = sdpvf->instr_queue[iq_no];
+ q_size = conf->iq.instr_type * conf->num_iqdef_descs;
+
+ /* IQ memory creation for Instruction submission to OCTEON TX2 */
+ iq->iq_mz = rte_memzone_reserve_aligned("iqmz",
+ q_size,
+ rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
+ if (iq->iq_mz == NULL) {
+ otx2_err("IQ[%d] memzone alloc failed", iq_no);
+ goto iq_init_fail;
+ }
+
+ iq->base_addr_dma = iq->iq_mz->iova;
+ iq->base_addr = (uint8_t *)iq->iq_mz->addr;
+
+ if (conf->num_iqdef_descs & (conf->num_iqdef_descs - 1)) {
+ otx2_err("IQ[%d] descs not in power of 2", iq_no);
+ goto iq_init_fail;
+ }
+
+ iq->nb_desc = conf->num_iqdef_descs;
+
+ /* Create a IQ request list to hold requests that have been
+ * posted to OCTEON TX2. This list will be used for freeing the IQ
+ * data buffer(s) later once the OCTEON TX2 fetched the requests.
+ */
+ iq->req_list = rte_zmalloc_socket("request_list",
+ (iq->nb_desc * SDP_IQREQ_LIST_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+ if (iq->req_list == NULL) {
+ otx2_err("IQ[%d] req_list alloc failed", iq_no);
+ goto iq_init_fail;
+ }
+
+ otx2_info("IQ[%d]: base: %p basedma: %lx count: %d",
+ iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
+ iq->nb_desc);
+
+ iq->sdp_dev = sdpvf;
+ iq->q_no = iq_no;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->otx_read_index = 0;
+ iq->flush_index = 0;
+
+ /* Initialize the spinlock for this instruction queue */
+ rte_spinlock_init(&iq->lock);
+ rte_spinlock_init(&iq->post_lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ sdpvf->io_qmask.iq |= (1ull << iq_no);
+
+ /* Set 32B/64B mode for each input queue */
+ if (conf->iq.instr_type == 64)
+ sdpvf->io_qmask.iq64B |= (1ull << iq_no);
+
+ iq->iqcmd_64B = (conf->iq.instr_type == 64);
+
+ /* Set up IQ registers */
+ sdpvf->fn_list.setup_iq_regs(sdpvf, iq_no);
+
+ return 0;
+
+iq_init_fail:
+ return -ENOMEM;
+
+}
+
+int
+sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
+{
+ struct sdp_instr_queue *iq;
+
+ iq = (struct sdp_instr_queue *)rte_zmalloc("sdp_IQ", sizeof(*iq),
+ RTE_CACHE_LINE_SIZE);
+ if (iq == NULL)
+ return -ENOMEM;
+
+ sdpvf->instr_queue[iq_no] = iq;
+
+ if (sdp_init_instr_queue(sdpvf, iq_no)) {
+ otx2_err("IQ init is failed");
+ goto delete_IQ;
+ }
+ otx2_info("IQ[%d] is created.", sdpvf->num_iqs);
+
+ sdpvf->num_iqs++;
+
+
+ return 0;
+
+delete_IQ:
+ sdp_delete_iqs(sdpvf, iq_no);
+ return -ENOMEM;
+}
+
+static void
+sdp_droq_reset_indices(struct sdp_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ rte_atomic64_set(&droq->pkts_pending, 0);
+}
+
+static void
+sdp_droq_destroy_ring_buffers(struct sdp_device *sdpvf,
+ struct sdp_droq *droq)
+{
+ uint32_t idx;
+
+ for (idx = 0; idx < droq->nb_desc; idx++) {
+ if (droq->recv_buf_list[idx].buffer) {
+ rte_mempool_put(sdpvf->enqdeq_mpool,
+ droq->recv_buf_list[idx].buffer);
+
+ droq->recv_buf_list[idx].buffer = NULL;
+ }
+ }
+
+ sdp_droq_reset_indices(droq);
+}
+
+/* Free OQs resources */
+int
+sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
+{
+ struct sdp_droq *droq;
+
+ droq = sdpvf->droq[oq_no];
+ if (droq == NULL) {
+ otx2_err("Invalid droq[%d]", oq_no);
+ return -ENOMEM;
+ }
+
+ sdp_droq_destroy_ring_buffers(sdpvf, droq);
+ rte_free(droq->recv_buf_list);
+ droq->recv_buf_list = NULL;
+
+ if (droq->info_mz) {
+ sdp_dmazone_free(droq->info_mz);
+ droq->info_mz = NULL;
+ }
+
+ if (droq->desc_ring_mz) {
+ sdp_dmazone_free(droq->desc_ring_mz);
+ droq->desc_ring_mz = NULL;
+ }
+
+ memset(droq, 0, SDP_DROQ_SIZE);
+
+ rte_free(sdpvf->droq[oq_no]);
+ sdpvf->droq[oq_no] = NULL;
+
+ sdpvf->num_oqs--;
+
+ otx2_info("OQ[%d] is deleted", oq_no);
+ return 0;
+}
+
+static int
+sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf,
+ struct sdp_droq *droq)
+{
+ struct sdp_droq_desc *desc_ring = droq->desc_ring;
+ uint32_t idx;
+ void *buf;
+
+ for (idx = 0; idx < droq->nb_desc; idx++) {
+ if (rte_mempool_get(sdpvf->enqdeq_mpool, &buf) ||
+ (buf == NULL)) {
+ otx2_err("OQ buffer alloc failed");
+ droq->stats.rx_alloc_failure++;
+ /* sdp_droq_destroy_ring_buffers(droq);*/
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[idx].buffer = buf;
+ droq->info_list[idx].length = 0;
+
+ /* Map ring buffers into memory */
+ desc_ring[idx].info_ptr = (uint64_t)(droq->info_list_dma +
+ (idx * SDP_DROQ_INFO_SIZE));
+
+ desc_ring[idx].buffer_ptr = rte_mem_virt2iova(buf);
+ }
+
+ sdp_droq_reset_indices(droq);
+
+ return 0;
+}
+
+static void *
+sdp_alloc_info_buffer(struct sdp_device *sdpvf __rte_unused,
+ struct sdp_droq *droq)
+{
+ droq->info_mz = rte_memzone_reserve_aligned("OQ_info_list",
+ (droq->nb_desc * SDP_DROQ_INFO_SIZE),
+ rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
+
+ if (droq->info_mz == NULL)
+ return NULL;
+
+ droq->info_list_dma = droq->info_mz->iova;
+ droq->info_alloc_size = droq->info_mz->len;
+ droq->info_base_addr = (size_t)droq->info_mz->addr;
+
+ return droq->info_mz->addr;
+}
+
+/* OQ initialization */
+static int
+sdp_init_droq(struct sdp_device *sdpvf, uint32_t q_no)
+{
+ const struct sdp_config *conf = sdpvf->conf;
+ uint32_t c_refill_threshold;
+ uint32_t desc_ring_size;
+ struct sdp_droq *droq;
+
+ otx2_info("OQ[%d] Init start", q_no);
+
+ droq = sdpvf->droq[q_no];
+ droq->sdp_dev = sdpvf;
+ droq->q_no = q_no;
+
+ c_refill_threshold = conf->oq.refill_threshold;
+ droq->nb_desc = conf->num_oqdef_descs;
+ droq->buffer_size = conf->oqdef_buf_size;
+
+ /* OQ desc_ring set up */
+ desc_ring_size = droq->nb_desc * SDP_DROQ_DESC_SIZE;
+ droq->desc_ring_mz = rte_memzone_reserve_aligned("sdp_oqmz",
+ desc_ring_size,
+ rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
+
+ if (droq->desc_ring_mz == NULL) {
+ otx2_err("OQ:%d desc_ring allocation failed", q_no);
+ goto init_droq_fail;
+ }
+
+ droq->desc_ring_dma = droq->desc_ring_mz->iova;
+ droq->desc_ring = (struct sdp_droq_desc *)droq->desc_ring_mz->addr;
+
+ otx2_sdp_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx",
+ q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
+ otx2_sdp_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc);
+
+
+ /* OQ info_list set up */
+ droq->info_list = sdp_alloc_info_buffer(sdpvf, droq);
+ if (droq->info_list == NULL) {
+ otx2_err("memory allocation failed for OQ[%d] info_list", q_no);
+ goto init_droq_fail;
+ }
+
+ /* OQ buf_list set up */
+ droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
+ (droq->nb_desc * SDP_DROQ_RECVBUF_SIZE),
+ RTE_CACHE_LINE_SIZE, rte_socket_id());
+ if (droq->recv_buf_list == NULL) {
+ otx2_err("OQ recv_buf_list alloc failed");
+ goto init_droq_fail;
+ }
+
+ if (sdp_droq_setup_ring_buffers(sdpvf, droq))
+ goto init_droq_fail;
+
+ droq->refill_threshold = c_refill_threshold;
+ rte_spinlock_init(&droq->lock);
+
+
+ /* Set up OQ registers */
+ sdpvf->fn_list.setup_oq_regs(sdpvf, q_no);
+
+ sdpvf->io_qmask.oq |= (1ull << q_no);
+
+ return 0;
+
+init_droq_fail:
+ return -ENOMEM;
+}
+
+/* OQ configuration and setup */
+int
+sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
+{
+ struct sdp_droq *droq;
+
+ /* Allocate new droq. */
+ droq = (struct sdp_droq *)rte_zmalloc("sdp_OQ",
+ sizeof(*droq), RTE_CACHE_LINE_SIZE);
+ if (droq == NULL) {
+ otx2_err("Droq[%d] Creation Failed", oq_no);
+ return -ENOMEM;
+ }
+ sdpvf->droq[oq_no] = droq;
+
+ if (sdp_init_droq(sdpvf, oq_no)) {
+ otx2_err("Droq[%d] Initialization failed", oq_no);
+ goto delete_OQ;
+ }
+ otx2_info("OQ[%d] is created.", oq_no);
+
+ sdpvf->num_oqs++;
+
+ return 0;
+
+delete_OQ:
+ sdp_delete_oqs(sdpvf, oq_no);
+ return -ENOMEM;
+}
+
+static inline void
+sdp_iqreq_delete(struct sdp_device *sdpvf,
+ struct sdp_instr_queue *iq, uint32_t idx)
+{
+ uint32_t reqtype;
+ void *buf;
+
+ buf = iq->req_list[idx].buf;
+ reqtype = iq->req_list[idx].reqtype;
+
+ switch (reqtype) {
+ case SDP_REQTYPE_NORESP:
+ rte_mempool_put(sdpvf->enqdeq_mpool, buf);
+ otx2_sdp_dbg("IQ buffer freed at idx[%d]", idx);
+ break;
+
+ case SDP_REQTYPE_NORESP_GATHER:
+ case SDP_REQTYPE_NONE:
+ default:
+ otx2_info("This iqreq mode is not supported:%d", reqtype);
+
+ }
+
+ /* Reset the request list at this index */
+ iq->req_list[idx].buf = NULL;
+ iq->req_list[idx].reqtype = 0;
+}
+
+static inline void
+sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf,
+ uint32_t reqtype)
+{
+ iq->req_list[iq->host_write_index].buf = buf;
+ iq->req_list[iq->host_write_index].reqtype = reqtype;
+
+ otx2_sdp_dbg("IQ buffer added at idx[%d]", iq->host_write_index);
+
+}
+
+static void
+sdp_flush_iq(struct sdp_device *sdpvf,
+ struct sdp_instr_queue *iq,
+ uint32_t pending_thresh __rte_unused)
+{
+ uint32_t instr_processed = 0;
+
+ rte_spinlock_lock(&iq->lock);
+
+ iq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq);
+ while (iq->flush_index != iq->otx_read_index) {
+ /* Free the IQ data buffer to the pool */
+ sdp_iqreq_delete(sdpvf, iq, iq->flush_index);
+ iq->flush_index =
+ sdp_incr_index(iq->flush_index, 1, iq->nb_desc);
+
+ instr_processed++;
+ }
+
+ iq->stats.instr_processed = instr_processed;
+ rte_atomic64_sub(&iq->instr_pending, instr_processed);
+
+ rte_spinlock_unlock(&iq->lock);
+}
+
+static inline void
+sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused,
+ struct sdp_instr_queue *iq)
+{
+ otx2_write64(iq->fill_cnt, iq->doorbell_reg);
+
+ /* Make sure doorbell writes observed by HW */
+ rte_cio_wmb();
+ iq->fill_cnt = 0;
+
+}
+
+static inline int
+post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd)
+{
+ uint8_t *iqptr, cmdsize;
+
+ /* This ensures that the read index does not wrap around to
+ * the same position if queue gets full before OCTEON TX2 could
+ * fetch any instr.
+ */
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->nb_desc - 1)) {
+ otx2_err("IQ is full, pending:%ld",
+ (long)rte_atomic64_read(&iq->instr_pending));
+
+ return SDP_IQ_SEND_FAILED;
+ }
+
+ /* Copy cmd into iq */
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ rte_memcpy(iqptr, iqcmd, cmdsize);
+
+ otx2_sdp_dbg("IQ cmd posted @ index:%d", iq->host_write_index);
+
+ /* Increment the host write index */
+ iq->host_write_index =
+ sdp_incr_index(iq->host_write_index, 1, iq->nb_desc);
+
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data
+ * is in memory before indicating that the instruction is
+ * pending.
+ */
+ rte_smp_wmb();
+ rte_atomic64_inc(&iq->instr_pending);
+
+ /* SDP_IQ_SEND_SUCCESS */
+ return 0;
+}
+
+
+static int
+sdp_send_data(struct sdp_device *sdpvf,
+ struct sdp_instr_queue *iq, void *cmd)
+{
+ uint32_t ret;
+
+ /* Lock this IQ command queue before posting instruction */
+ rte_spinlock_lock(&iq->post_lock);
+
+ /* Submit IQ command */
+ ret = post_iqcmd(iq, cmd);
+
+ if (ret == SDP_IQ_SEND_SUCCESS) {
+ sdp_ring_doorbell(sdpvf, iq);
+
+ iq->stats.instr_posted++;
+ otx2_sdp_dbg("Instr submit success posted: %ld\n",
+ (long)iq->stats.instr_posted);
+
+ } else {
+ iq->stats.instr_dropped++;
+ otx2_err("Instr submit failed, dropped: %ld\n",
+ (long)iq->stats.instr_dropped);
+
+ }
+
+ rte_spinlock_unlock(&iq->post_lock);
+
+ return ret;
+}
+
+
+/* Enqueue requests/packets to SDP IQ queue.
+ * returns number of requests enqueued successfully
+ */
+int
+sdp_rawdev_enqueue(struct rte_rawdev *rawdev,
+ struct rte_rawdev_buf **buffers __rte_unused,
+ unsigned int count, rte_rawdev_obj_t context)
+{
+ struct sdp_instr_64B *iqcmd;
+ struct sdp_instr_queue *iq;
+ struct sdp_soft_instr *si;
+ struct sdp_device *sdpvf;
+
+ struct sdp_instr_ih ihx;
+
+ sdpvf = (struct sdp_device *)rawdev->dev_private;
+ si = (struct sdp_soft_instr *)context;
+
+ iq = sdpvf->instr_queue[si->q_no];
+
+ if ((count > 1) || (count < 1)) {
+ otx2_err("This mode not supported: req[%d]", count);
+ goto enq_fail;
+ }
+
+ memset(&ihx, 0, sizeof(struct sdp_instr_ih));
+
+ iqcmd = &si->command;
+ memset(iqcmd, 0, sizeof(struct sdp_instr_64B));
+
+ iqcmd->dptr = (uint64_t)si->dptr;
+
+ /* Populate SDP IH */
+ ihx.pkind = sdpvf->pkind;
+ ihx.fsz = si->ih.fsz + 8; /* 8B for NIX IH */
+ ihx.gather = si->ih.gather;
+
+ /* Direct data instruction */
+ ihx.tlen = si->ih.tlen + ihx.fsz;
+
+ switch (ihx.gather) {
+ case 0: /* Direct data instr */
+ ihx.tlen = si->ih.tlen + ihx.fsz;
+ break;
+
+ default: /* Gather */
+ switch (si->ih.gsz) {
+ case 0: /* Direct gather instr */
+ otx2_err("Direct Gather instr : not supported");
+ goto enq_fail;
+
+ default: /* Indirect gather instr */
+ otx2_err("Indirect Gather instr : not supported");
+ goto enq_fail;
+ }
+ }
+
+ rte_memcpy(&iqcmd->ih, &ihx, sizeof(uint64_t));
+ iqcmd->rptr = (uint64_t)si->rptr;
+ rte_memcpy(&iqcmd->irh, &si->irh, sizeof(uint64_t));
+
+ /* Swap FSZ(front data) here, to avoid swapping on OCTEON TX2 side */
+ sdp_swap_8B_data(&iqcmd->rptr, 1);
+ sdp_swap_8B_data(&iqcmd->irh, 1);
+
+ otx2_sdp_dbg("After swapping");
+ otx2_sdp_dbg("Word0 [dptr]: 0x%016lx", (unsigned long)iqcmd->dptr);
+ otx2_sdp_dbg("Word1 [ihtx]: 0x%016lx", (unsigned long)iqcmd->ih);
+ otx2_sdp_dbg("Word2 [rptr]: 0x%016lx", (unsigned long)iqcmd->rptr);
+ otx2_sdp_dbg("Word3 [irh]: 0x%016lx", (unsigned long)iqcmd->irh);
+ otx2_sdp_dbg("Word4 [exhdr[0]]: 0x%016lx",
+ (unsigned long)iqcmd->exhdr[0]);
+
+ sdp_iqreq_add(iq, si->dptr, si->reqtype);
+
+ if (sdp_send_data(sdpvf, iq, iqcmd)) {
+ otx2_err("Data send failed :");
+ sdp_iqreq_delete(sdpvf, iq, iq->host_write_index);
+ goto enq_fail;
+ }
+
+ if (rte_atomic64_read(&iq->instr_pending) >= 1)
+ sdp_flush_iq(sdpvf, iq, 1 /*(iq->nb_desc / 2)*/);
+
+ /* Return no# of instructions posted successfully. */
+ return count;
+
+enq_fail:
+ return SDP_IQ_SEND_FAILED;
+}
+
+static uint32_t
+sdp_droq_refill(struct sdp_device *sdpvf, struct sdp_droq *droq)
+{
+ struct sdp_droq_desc *desc_ring;
+ uint32_t desc_refilled = 0;
+ void *buf = NULL;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse the buffer, else allocate.
+ */
+ if (droq->recv_buf_list[droq->refill_idx].buffer != NULL)
+ break;
+
+ if (rte_mempool_get(sdpvf->enqdeq_mpool, &buf) ||
+ (buf == NULL)) {
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ droq->stats.rx_alloc_failure++;
+ break;
+ }
+
+ droq->recv_buf_list[droq->refill_idx].buffer = buf;
+ desc_ring[droq->refill_idx].buffer_ptr = rte_mem_virt2iova(buf);
+
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ droq->refill_idx = sdp_incr_index(droq->refill_idx, 1,
+ droq->nb_desc);
+
+ desc_refilled++;
+ droq->refill_count--;
+
+ }
+
+ return desc_refilled;
+}
+
+static int
+sdp_droq_read_packet(struct sdp_device *sdpvf __rte_unused,
+ struct sdp_droq *droq,
+ struct sdp_droq_pkt *droq_pkt)
+{
+ struct sdp_droq_info *info;
+ uint32_t total_len = 0;
+ uint32_t pkt_len = 0;
+
+ info = &droq->info_list[droq->read_idx];
+ sdp_swap_8B_data((uint64_t *)&info->length, 1);
+ if (!info->length) {
+ otx2_err("OQ info_list->length[%ld]", (long)info->length);
+ goto oq_read_fail;
+ }
+
+ /* Deduce the actual data size */
+ info->length -= SDP_RH_SIZE;
+ total_len += (uint32_t)info->length;
+
+ otx2_sdp_dbg("OQ: pkt_len[%ld], buffer_size %d",
+ (long)info->length, droq->buffer_size);
+ if (info->length > droq->buffer_size) {
+ otx2_err("This mode is not supported: pkt_len > buffer_size");
+ goto oq_read_fail;
+ }
+
+ if (info->length <= droq->buffer_size) {
+ pkt_len = (uint32_t)info->length;
+ droq_pkt->data = droq->recv_buf_list[droq->read_idx].buffer;
+ droq_pkt->len = pkt_len;
+
+ droq->recv_buf_list[droq->read_idx].buffer = NULL;
+ droq->read_idx = sdp_incr_index(droq->read_idx, 1,/* count */
+ droq->nb_desc /* max rd idx */);
+ droq->refill_count++;
+
+ }
+
+ info->length = 0;
+
+ return SDP_OQ_RECV_SUCCESS;
+
+oq_read_fail:
+ return SDP_OQ_RECV_FAILED;
+}
+
+static inline uint32_t
+sdp_check_droq_pkts(struct sdp_droq *droq, uint32_t burst_size)
+{
+ uint32_t min_pkts = 0;
+ uint32_t new_pkts;
+ uint32_t pkt_count;
+
+ /* Latest available OQ packets */
+ pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ /* Newly arrived packets */
+ new_pkts = pkt_count - droq->last_pkt_count;
+ otx2_sdp_dbg("Recvd [%d] new OQ pkts", new_pkts);
+
+ min_pkts = (new_pkts > burst_size) ? burst_size : new_pkts;
+ if (min_pkts) {
+ rte_atomic64_add(&droq->pkts_pending, min_pkts);
+ /* Back up the aggregated packet count so far */
+ droq->last_pkt_count += min_pkts;
+ }
+
+ return min_pkts;
+}
+
+/* Check for response arrival from OCTEON TX2
+ * returns number of requests completed
+ */
+int
+sdp_rawdev_dequeue(struct rte_rawdev *rawdev,
+ struct rte_rawdev_buf **buffers, unsigned int count,
+ rte_rawdev_obj_t context __rte_unused)
+{
+ struct sdp_droq_pkt *oq_pkt;
+ struct sdp_device *sdpvf;
+ struct sdp_droq *droq;
+
+ uint32_t q_no = 0, pkts;
+ uint32_t new_pkts;
+ uint32_t ret;
+
+ sdpvf = (struct sdp_device *)rawdev->dev_private;
+
+ droq = sdpvf->droq[q_no];
+ if (!droq) {
+ otx2_err("Invalid droq[%d]", q_no);
+ goto droq_err;
+ }
+
+ /* Grab the lock */
+ rte_spinlock_lock(&droq->lock);
+
+ new_pkts = sdp_check_droq_pkts(droq, count);
+ if (!new_pkts) {
+ otx2_sdp_dbg("Zero new_pkts:%d", new_pkts);
+ goto deq_fail; /* No pkts at this moment */
+ }
+
+ otx2_sdp_dbg("Received new_pkts = %d", new_pkts);
+
+ for (pkts = 0; pkts < new_pkts; pkts++) {
+
+ /* Push the received pkt to application */
+ oq_pkt = (struct sdp_droq_pkt *)buffers[pkts];
+
+ ret = sdp_droq_read_packet(sdpvf, droq, oq_pkt);
+ if (ret) {
+ otx2_err("DROQ read pakt failed.");
+ goto deq_fail;
+ }
+
+ /* Stats */
+ droq->stats.pkts_received++;
+ droq->stats.bytes_received += oq_pkt->len;
+ }
+
+ /* Ack the h/w with no# of pkts read by Host */
+ rte_write32(pkts, droq->pkts_sent_reg);
+ rte_cio_wmb();
+
+ droq->last_pkt_count -= pkts;
+
+ otx2_sdp_dbg("DROQ pkts[%d] pushed to application", pkts);
+
+ /* Refill DROQ buffers */
+ if (droq->refill_count >= 2 /* droq->refill_threshold */) {
+ int desc_refilled = sdp_droq_refill(sdpvf, droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory is
+ * accurate.
+ */
+ rte_write32(desc_refilled, droq->pkts_credit_reg);
+
+ /* Ensure mmio write completes */
+ rte_wmb();
+ otx2_sdp_dbg("Refilled count = %d", desc_refilled);
+ }
+
+ /* Release the spin lock */
+ rte_spinlock_unlock(&droq->lock);
+
+ return pkts;
+
+deq_fail:
+ rte_spinlock_unlock(&droq->lock);
+
+droq_err:
+ return SDP_OQ_RECV_FAILED;
+}
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h
new file mode 100644
index 000000000..172fdc556
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_EP_ENQDEQ_H_
+#define _OTX2_EP_ENQDEQ_H_
+
+#include <rte_byteorder.h>
+#include "otx2_ep_rawdev.h"
+
+#define SDP_IQ_SEND_FAILED (-1)
+#define SDP_IQ_SEND_SUCCESS (0)
+
+#define SDP_OQ_RECV_FAILED (-1)
+#define SDP_OQ_RECV_SUCCESS (0)
+
+static inline uint64_t
+sdp_endian_swap_8B(uint64_t _d)
+{
+ return ((((((uint64_t)(_d)) >> 0) & (uint64_t)0xff) << 56) |
+ (((((uint64_t)(_d)) >> 8) & (uint64_t)0xff) << 48) |
+ (((((uint64_t)(_d)) >> 16) & (uint64_t)0xff) << 40) |
+ (((((uint64_t)(_d)) >> 24) & (uint64_t)0xff) << 32) |
+ (((((uint64_t)(_d)) >> 32) & (uint64_t)0xff) << 24) |
+ (((((uint64_t)(_d)) >> 40) & (uint64_t)0xff) << 16) |
+ (((((uint64_t)(_d)) >> 48) & (uint64_t)0xff) << 8) |
+ (((((uint64_t)(_d)) >> 56) & (uint64_t)0xff) << 0));
+}
+
+static inline void
+sdp_swap_8B_data(uint64_t *data, uint32_t blocks)
+{
+ /* Swap 8B blocks */
+ while (blocks) {
+ *data = sdp_endian_swap_8B(*data);
+ blocks--;
+ data++;
+ }
+}
+
+static inline uint32_t
+sdp_incr_index(uint32_t index, uint32_t count, uint32_t max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
+#endif /* _OTX2_EP_ENQDEQ_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c
new file mode 100644
index 000000000..0778603d5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_bus.h>
+#include <rte_bus_pci.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_mempool.h>
+#include <rte_pci.h>
+
+#include <rte_common.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "otx2_common.h"
+#include "otx2_ep_rawdev.h"
+#include "otx2_ep_vf.h"
+
+static const struct rte_pci_id pci_sdp_vf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_EP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+/* SDP_VF default configuration */
+const struct sdp_config default_sdp_conf = {
+ /* IQ attributes */
+ .iq = {
+ .max_iqs = SDP_VF_CFG_IO_QUEUES,
+ .instr_type = SDP_VF_64BYTE_INSTR,
+ .pending_list_size = (SDP_VF_MAX_IQ_DESCRIPTORS *
+ SDP_VF_CFG_IO_QUEUES),
+ },
+
+ /* OQ attributes */
+ .oq = {
+ .max_oqs = SDP_VF_CFG_IO_QUEUES,
+ .info_ptr = SDP_VF_OQ_INFOPTR_MODE,
+ .refill_threshold = SDP_VF_OQ_REFIL_THRESHOLD,
+ },
+
+ .num_iqdef_descs = SDP_VF_MAX_IQ_DESCRIPTORS,
+ .num_oqdef_descs = SDP_VF_MAX_OQ_DESCRIPTORS,
+ .oqdef_buf_size = SDP_VF_OQ_BUF_SIZE,
+
+};
+
+const struct sdp_config*
+sdp_get_defconf(struct sdp_device *sdp_dev __rte_unused)
+{
+ const struct sdp_config *default_conf = NULL;
+
+ default_conf = &default_sdp_conf;
+
+ return default_conf;
+}
+
+static int
+sdp_vfdev_exit(struct rte_rawdev *rawdev)
+{
+ struct sdp_device *sdpvf;
+ uint32_t rawdev_queues, q;
+
+ otx2_info("%s:", __func__);
+
+ sdpvf = (struct sdp_device *)rawdev->dev_private;
+
+ sdpvf->fn_list.disable_io_queues(sdpvf);
+
+ rawdev_queues = sdpvf->num_oqs;
+ for (q = 0; q < rawdev_queues; q++) {
+ if (sdp_delete_oqs(sdpvf, q)) {
+ otx2_err("Failed to delete OQ:%d", q);
+ return -ENOMEM;
+ }
+ }
+ otx2_info("Num OQs:%d freed", sdpvf->num_oqs);
+
+ /* Free the oqbuf_pool */
+ rte_mempool_free(sdpvf->enqdeq_mpool);
+ sdpvf->enqdeq_mpool = NULL;
+
+ otx2_info("Enqdeq_mpool free done");
+
+ rawdev_queues = sdpvf->num_iqs;
+ for (q = 0; q < rawdev_queues; q++) {
+ if (sdp_delete_iqs(sdpvf, q)) {
+ otx2_err("Failed to delete IQ:%d", q);
+ return -ENOMEM;
+ }
+ }
+ otx2_sdp_dbg("Num IQs:%d freed", sdpvf->num_iqs);
+
+ return 0;
+}
+
+static int
+sdp_chip_specific_setup(struct sdp_device *sdpvf)
+{
+ struct rte_pci_device *pdev = sdpvf->pci_dev;
+ uint32_t dev_id = pdev->id.device_id;
+ int ret;
+
+ switch (dev_id) {
+ case PCI_DEVID_OCTEONTX2_EP_VF:
+ sdpvf->chip_id = PCI_DEVID_OCTEONTX2_EP_VF;
+ ret = sdp_vf_setup_device(sdpvf);
+
+ break;
+ default:
+ otx2_err("Unsupported device");
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ otx2_info("SDP dev_id[%d]", dev_id);
+
+ return ret;
+}
+
+/* SDP VF device initialization */
+static int
+sdp_vfdev_init(struct sdp_device *sdpvf)
+{
+ uint32_t rawdev_queues, q;
+
+ if (sdp_chip_specific_setup(sdpvf)) {
+ otx2_err("Chip specific setup failed");
+ goto setup_fail;
+ }
+
+ if (sdpvf->fn_list.setup_device_regs(sdpvf)) {
+ otx2_err("Failed to configure device registers");
+ goto setup_fail;
+ }
+
+ rawdev_queues = (uint32_t)(sdpvf->sriov_info.rings_per_vf);
+
+ /* Rawdev queues setup for enqueue/dequeue */
+ for (q = 0; q < rawdev_queues; q++) {
+ if (sdp_setup_iqs(sdpvf, q)) {
+ otx2_err("Failed to setup IQs");
+ goto iq_fail;
+ }
+ }
+ otx2_info("Total[%d] IQs setup", sdpvf->num_iqs);
+
+ for (q = 0; q < rawdev_queues; q++) {
+ if (sdp_setup_oqs(sdpvf, q)) {
+ otx2_err("Failed to setup OQs");
+ goto oq_fail;
+ }
+ }
+ otx2_info("Total [%d] OQs setup", sdpvf->num_oqs);
+
+ /* Enable IQ/OQ for this device */
+ sdpvf->fn_list.enable_io_queues(sdpvf);
+
+ /* Send OQ desc credits for OQs, credits are always
+ * sent after the OQs are enabled.
+ */
+ for (q = 0; q < rawdev_queues; q++) {
+ rte_write32(sdpvf->droq[q]->nb_desc,
+ sdpvf->droq[q]->pkts_credit_reg);
+
+ rte_io_mb();
+ otx2_info("OQ[%d] dbells [%d]", q,
+ rte_read32(sdpvf->droq[q]->pkts_credit_reg));
+ }
+
+ rte_wmb();
+
+ otx2_info("SDP Device is Ready");
+
+ return 0;
+
+/* Error handling */
+oq_fail:
+ /* Free the allocated OQs */
+ for (q = 0; q < sdpvf->num_oqs; q++)
+ sdp_delete_oqs(sdpvf, q);
+
+iq_fail:
+ /* Free the allocated IQs */
+ for (q = 0; q < sdpvf->num_iqs; q++)
+ sdp_delete_iqs(sdpvf, q);
+
+setup_fail:
+ return -ENOMEM;
+}
+
+static int
+sdp_rawdev_start(struct rte_rawdev *dev)
+{
+ dev->started = 1;
+
+ return 0;
+}
+
+static void
+sdp_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = 0;
+}
+
+static int
+sdp_rawdev_close(struct rte_rawdev *dev)
+{
+ int ret;
+ ret = sdp_vfdev_exit(dev);
+ if (ret) {
+ otx2_err(" SDP_EP rawdev exit error");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+sdp_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
+{
+ struct sdp_rawdev_info *app_info = (struct sdp_rawdev_info *)config;
+ struct sdp_device *sdpvf;
+
+ if (app_info == NULL) {
+ otx2_err("Application config info [NULL]");
+ return -EINVAL;
+ }
+
+ sdpvf = (struct sdp_device *)dev->dev_private;
+
+ sdpvf->conf = app_info->app_conf;
+ sdpvf->enqdeq_mpool = app_info->enqdeq_mpool;
+
+ sdp_vfdev_init(sdpvf);
+
+ return 0;
+
+}
+
+/* SDP VF endpoint rawdev ops */
+static const struct rte_rawdev_ops sdp_rawdev_ops = {
+ .dev_configure = sdp_rawdev_configure,
+ .dev_start = sdp_rawdev_start,
+ .dev_stop = sdp_rawdev_stop,
+ .dev_close = sdp_rawdev_close,
+ .enqueue_bufs = sdp_rawdev_enqueue,
+ .dequeue_bufs = sdp_rawdev_dequeue,
+ .dev_selftest = sdp_rawdev_selftest,
+};
+
+static int
+otx2_sdp_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct sdp_device *sdpvf = NULL;
+ struct rte_rawdev *sdp_rawdev;
+ uint16_t vf_id;
+
+ /* Single process support */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr)
+ otx2_info("SDP_EP BAR0 is mapped:");
+ else {
+ otx2_err("SDP_EP: Failed to map device BARs");
+ otx2_err("BAR0 %p\n BAR2 %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "SDPEP:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ /* Allocate rawdev pmd */
+ sdp_rawdev = rte_rawdev_pmd_allocate(name,
+ sizeof(struct sdp_device),
+ rte_socket_id());
+
+ if (sdp_rawdev == NULL) {
+ otx2_err("SDP_EP VF rawdev allocation failed");
+ return -ENOMEM;
+ }
+
+ sdp_rawdev->dev_ops = &sdp_rawdev_ops;
+ sdp_rawdev->device = &pci_dev->device;
+ sdp_rawdev->driver_name = pci_dev->driver->driver.name;
+
+ sdpvf = (struct sdp_device *)sdp_rawdev->dev_private;
+ sdpvf->hw_addr = pci_dev->mem_resource[0].addr;
+ sdpvf->pci_dev = pci_dev;
+
+ /* Discover the VF number being probed */
+ vf_id = ((pci_dev->addr.devid & 0x1F) << 3) |
+ (pci_dev->addr.function & 0x7);
+
+ vf_id -= 1;
+ sdpvf->vf_num = vf_id;
+
+ otx2_info("SDP_EP VF[%d] probe done", vf_id);
+
+ return 0;
+}
+
+static int
+otx2_sdp_rawdev_remove(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_rawdev *rawdev;
+ struct sdp_device *sdpvf;
+
+ /* Single process support */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev == NULL) {
+ otx2_err("SDP_EP:invalid pci_dev!");
+ return -EINVAL;
+ }
+
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "SDPEP:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (rawdev == NULL) {
+ otx2_err("SDP_EP: invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ sdpvf = (struct sdp_device *)rawdev->dev_private;
+ otx2_info("Removing SDP_EP VF[%d] ", sdpvf->vf_num);
+
+ /* rte_rawdev_close is called by pmd_release */
+ return rte_rawdev_pmd_release(rawdev);
+}
+
+static struct rte_pci_driver rte_sdp_rawdev_pmd = {
+ .id_table = pci_sdp_vf_map,
+ .drv_flags = (RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA),
+ .probe = otx2_sdp_rawdev_probe,
+ .remove = otx2_sdp_rawdev_remove,
+};
+
+RTE_PMD_REGISTER_PCI(sdp_rawdev_pci_driver, rte_sdp_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(sdp_rawdev_pci_driver, pci_sdp_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(sdp_rawdev_pci_driver, "vfio-pci");
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h
new file mode 100644
index 000000000..dab2fb754
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h
@@ -0,0 +1,499 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_EP_RAWDEV_H_
+#define _OTX2_EP_RAWDEV_H_
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+
+/* IQ instruction req types */
+#define SDP_REQTYPE_NONE (0)
+#define SDP_REQTYPE_NORESP (1)
+#define SDP_REQTYPE_NORESP_GATHER (2)
+
+/* Input Request Header format */
+struct sdp_instr_irh {
+ /* Request ID */
+ uint64_t rid:16;
+
+ /* PCIe port to use for response */
+ uint64_t pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ uint64_t scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ uint64_t rlenssz:14;
+
+ /* Desired destination port for result */
+ uint64_t dport:6;
+
+ /* Opcode Specific parameters */
+ uint64_t param:8;
+
+ /* Opcode for the return packet */
+ uint64_t opcode:16;
+};
+
+/* SDP 32B instruction format */
+struct sdp_instr_32B {
+ /* Pointer where the input data is available. */
+ uint64_t dptr;
+
+ /* SDP Instruction Header. */
+ uint64_t ih;
+
+ /** Pointer where the response for a RAW mode packet
+ * will be written by OCTEON TX2.
+ */
+ uint64_t rptr;
+
+ /* Input Request Header. Additional info about the input. */
+ uint64_t irh;
+};
+#define SDP_32B_INSTR_SIZE (sizeof(sdp_instr_32B))
+
+/* SDP 64B instruction format */
+struct sdp_instr_64B {
+ /* Pointer where the input data is available. */
+ uint64_t dptr;
+
+ /* SDP Instruction Header. */
+ uint64_t ih;
+
+ /** Pointer where the response for a RAW mode packet
+ * will be written by OCTEON TX2.
+ */
+ uint64_t rptr;
+
+ /* Input Request Header. */
+ uint64_t irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ uint64_t exhdr[4];
+};
+#define SDP_64B_INSTR_SIZE (sizeof(sdp_instr_64B))
+
+struct sdp_soft_instr {
+ /** Input data pointer. It is either pointing directly to input data
+ * or to a gather list.
+ */
+ void *dptr;
+
+ /** Response from OCTEON TX2 comes at this address. It is either
+ * directlty pointing to output data buffer or to a scatter list.
+ */
+ void *rptr;
+
+ /* The instruction header. All input commands have this field. */
+ struct sdp_instr_ih ih;
+
+ /* Input request header. */
+ struct sdp_instr_irh irh;
+
+ /** The PCI instruction to be sent to OCTEON TX2. This is stored in the
+ * instr to retrieve the physical address of buffers when instr is
+ * freed.
+ */
+ struct sdp_instr_64B command;
+
+ /** If a gather list was allocated, this ptr points to the buffer used
+ * for the gather list. The gather list has to be 8B aligned, so this
+ * value may be different from dptr.
+ */
+ void *gather_ptr;
+
+ /* Total data bytes transferred in the gather mode request. */
+ uint64_t gather_bytes;
+
+ /** If a scatter list was allocated, this ptr points to the buffer used
+ * for the scatter list. The scatter list has to be 8B aligned, so
+ * this value may be different from rptr.
+ */
+ void *scatter_ptr;
+
+ /* Total data bytes to be received in the scatter mode request. */
+ uint64_t scatter_bytes;
+
+ /* IQ number to which this instruction has to be submitted. */
+ uint32_t q_no;
+
+ /* IQ instruction request type. */
+ uint32_t reqtype;
+};
+#define SDP_SOFT_INSTR_SIZE (sizeof(sdp_soft_instr))
+
+/* SDP IQ request list */
+struct sdp_instr_list {
+ void *buf;
+ uint32_t reqtype;
+};
+#define SDP_IQREQ_LIST_SIZE (sizeof(struct sdp_instr_list))
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct sdp_iq_stats {
+ uint64_t instr_posted; /* Instructions posted to this queue. */
+ uint64_t instr_processed; /* Instructions processed in this queue. */
+ uint64_t instr_dropped; /* Instructions that could not be processed */
+};
+
+/* Structure to define the configuration attributes for each Input queue. */
+struct sdp_iq_config {
+ /* Max number of IQs available */
+ uint16_t max_iqs;
+
+ /* Command size - 32 or 64 bytes */
+ uint16_t instr_type;
+
+ /* Pending list size, usually set to the sum of the size of all IQs */
+ uint32_t pending_list_size;
+};
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet data
+ * to OCTEON TX2 device from the host. Each IQ of a SDP EP VF device has one
+ * such structure to represent it.
+ */
+struct sdp_instr_queue {
+ /* A spinlock to protect access to the input ring. */
+ rte_spinlock_t lock;
+ rte_spinlock_t post_lock;
+
+ struct sdp_device *sdp_dev;
+ rte_atomic64_t iq_flush_running;
+
+ uint32_t q_no;
+ uint32_t pkt_in_done;
+
+ /* Flag for 64 byte commands. */
+ uint32_t iqcmd_64B:1;
+ uint32_t rsvd:17;
+ uint32_t status:8;
+
+ /* Number of descriptors in this ring. */
+ uint32_t nb_desc;
+
+ /* Input ring index, where the driver should write the next packet */
+ uint32_t host_write_index;
+
+ /* Input ring index, where the OCTEON TX2 should read the next packet */
+ uint32_t otx_read_index;
+
+ /** This index aids in finding the window in the queue where OCTEON TX2
+ * has read the commands.
+ */
+ uint32_t flush_index;
+
+ /* This keeps track of the instructions pending in this queue. */
+ rte_atomic64_t instr_pending;
+
+ uint32_t reset_instr_cnt;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ uint8_t *base_addr;
+
+ /* This IQ request list */
+ struct sdp_instr_list *req_list;
+
+ /* SDP doorbell register for the ring. */
+ void *doorbell_reg;
+
+ /* SDP instruction count register for this ring. */
+ void *inst_cnt_reg;
+
+ /* Number of instructions pending to be posted to OCTEON TX2. */
+ uint32_t fill_cnt;
+
+ /* Statistics for this input queue. */
+ struct sdp_iq_stats stats;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ uint64_t base_addr_dma;
+
+ /* Memory zone */
+ const struct rte_memzone *iq_mz;
+};
+
+/* DROQ packet format for application i/f. */
+struct sdp_droq_pkt {
+ /* DROQ packet data buffer pointer. */
+ uint8_t *data;
+
+ /* DROQ packet data length */
+ uint32_t len;
+
+ uint32_t misc;
+};
+
+/** Descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a sdp_droq_info structure.
+ * The device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct sdp_droq_desc {
+ /* The buffer pointer */
+ uint64_t buffer_ptr;
+
+ /* The Info pointer */
+ uint64_t info_ptr;
+};
+#define SDP_DROQ_DESC_SIZE (sizeof(struct sdp_droq_desc))
+
+/* Receive Header */
+union sdp_rh {
+ uint64_t rh64;
+};
+#define SDP_RH_SIZE (sizeof(union sdp_rh))
+
+/** Information about packet DMA'ed by OCTEON TX2.
+ * The format of the information available at Info Pointer after OCTEON TX2
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct sdp_droq_info {
+ /* The Output Receive Header. */
+ union sdp_rh rh;
+
+ /* The Length of the packet. */
+ uint64_t length;
+};
+#define SDP_DROQ_INFO_SIZE (sizeof(struct sdp_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the OCTEON TX2 device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+ */
+struct sdp_recv_buffer {
+ /* Packet buffer, including meta data. */
+ void *buffer;
+
+ /* Data in the packet buffer. */
+ /* uint8_t *data; */
+};
+#define SDP_DROQ_RECVBUF_SIZE (sizeof(struct sdp_recv_buffer))
+
+/* DROQ statistics. Each output queue has four stats fields. */
+struct sdp_droq_stats {
+ /* Number of packets received in this queue. */
+ uint64_t pkts_received;
+
+ /* Bytes received by this queue. */
+ uint64_t bytes_received;
+
+ /* Num of failures of rte_pktmbuf_alloc() */
+ uint64_t rx_alloc_failure;
+};
+
+/* Structure to define the configuration attributes for each Output queue. */
+struct sdp_oq_config {
+ /* Max number of OQs available */
+ uint16_t max_oqs;
+
+ /* If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ uint16_t info_ptr;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ uint32_t refill_threshold;
+};
+
+/* The Descriptor Ring Output Queue(DROQ) structure. */
+struct sdp_droq {
+ /* A spinlock to protect access to this ring. */
+ rte_spinlock_t lock;
+
+ struct sdp_device *sdp_dev;
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct sdp_droq_desc *desc_ring;
+
+ uint32_t q_no;
+ uint32_t last_pkt_count;
+
+ /* Driver should read the next packet at this index */
+ uint32_t read_idx;
+
+ /* OCTEON TX2 will write the next packet at this index */
+ uint32_t write_idx;
+
+ /* At this index, the driver will refill the descriptor's buffer */
+ uint32_t refill_idx;
+
+ /* Packets pending to be processed */
+ rte_atomic64_t pkts_pending;
+
+ /* Number of descriptors in this ring. */
+ uint32_t nb_desc;
+
+ /* The number of descriptors pending to refill. */
+ uint32_t refill_count;
+
+ uint32_t refill_threshold;
+
+ /* The 8B aligned info ptrs begin from this address. */
+ struct sdp_droq_info *info_list;
+
+ /* receive buffer list contains virtual addresses of the buffers. */
+ struct sdp_recv_buffer *recv_buf_list;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ uint32_t buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register. OCTEON TX2 writes the
+ * number of packets DMA'ed to host memory in this register.
+ */
+ void *pkts_sent_reg;
+
+ /* Statistics for this DROQ. */
+ struct sdp_droq_stats stats;
+
+ /* DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /* Info_ptr list is allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /* DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /* Allocated size of info list. */
+ uint32_t info_alloc_size;
+
+ /* Memory zone **/
+ const struct rte_memzone *desc_ring_mz;
+ const struct rte_memzone *info_mz;
+};
+#define SDP_DROQ_SIZE (sizeof(struct sdp_droq))
+
+/* IQ/OQ mask */
+struct sdp_io_enable {
+ uint64_t iq;
+ uint64_t oq;
+ uint64_t iq64B;
+};
+
+/* Structure to define the configuration. */
+struct sdp_config {
+ /* Input Queue attributes. */
+ struct sdp_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct sdp_oq_config oq;
+
+ /* Num of desc for IQ rings */
+ uint32_t num_iqdef_descs;
+
+ /* Num of desc for OQ rings */
+ uint32_t num_oqdef_descs;
+
+ /* OQ buffer size */
+ uint32_t oqdef_buf_size;
+};
+
+/* Required functions for each VF device */
+struct sdp_fn_list {
+ void (*setup_iq_regs)(struct sdp_device *sdpvf, uint32_t q_no);
+ void (*setup_oq_regs)(struct sdp_device *sdpvf, uint32_t q_no);
+
+ int (*setup_device_regs)(struct sdp_device *sdpvf);
+ uint32_t (*update_iq_read_idx)(struct sdp_instr_queue *iq);
+
+ void (*enable_io_queues)(struct sdp_device *sdpvf);
+ void (*disable_io_queues)(struct sdp_device *sdpvf);
+
+ void (*enable_iq)(struct sdp_device *sdpvf, uint32_t q_no);
+ void (*disable_iq)(struct sdp_device *sdpvf, uint32_t q_no);
+
+ void (*enable_oq)(struct sdp_device *sdpvf, uint32_t q_no);
+ void (*disable_oq)(struct sdp_device *sdpvf, uint32_t q_no);
+};
+
+/* SRIOV information */
+struct sdp_sriov_info {
+ /* Number of rings assigned to VF */
+ uint32_t rings_per_vf;
+
+ /* Number of VF devices enabled */
+ uint32_t num_vfs;
+};
+
+
+/* Information to be passed from application */
+struct sdp_rawdev_info {
+ struct rte_mempool *enqdeq_mpool;
+ const struct sdp_config *app_conf;
+};
+
+/* SDP EP VF device */
+struct sdp_device {
+ /* PCI device pointer */
+ struct rte_pci_device *pci_dev;
+ uint16_t chip_id;
+ uint16_t pf_num;
+ uint16_t vf_num;
+
+ /* This device's PCIe port used for traffic. */
+ uint16_t pcie_port;
+ uint32_t pkind;
+
+ /* The state of this device */
+ rte_atomic64_t status;
+
+ /* Memory mapped h/w address */
+ uint8_t *hw_addr;
+
+ struct sdp_fn_list fn_list;
+
+ /* Num IQs */
+ uint32_t num_iqs;
+
+ /* The input instruction queues */
+ struct sdp_instr_queue *instr_queue[SDP_VF_MAX_IOQS_PER_RAWDEV];
+
+ /* Num OQs */
+ uint32_t num_oqs;
+
+ /* The DROQ output queues */
+ struct sdp_droq *droq[SDP_VF_MAX_IOQS_PER_RAWDEV];
+
+ /* IOQ data buffer pool */
+ struct rte_mempool *enqdeq_mpool;
+
+ /* IOQ mask */
+ struct sdp_io_enable io_qmask;
+
+ /* SR-IOV info */
+ struct sdp_sriov_info sriov_info;
+
+ /* Device configuration */
+ const struct sdp_config *conf;
+};
+
+const struct sdp_config *sdp_get_defconf(struct sdp_device *sdp_dev);
+int sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no);
+int sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no);
+
+int sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no);
+int sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no);
+
+int sdp_rawdev_enqueue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers,
+ unsigned int count, rte_rawdev_obj_t context);
+int sdp_rawdev_dequeue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers,
+ unsigned int count, rte_rawdev_obj_t context);
+
+int sdp_rawdev_selftest(uint16_t dev_id);
+
+#endif /* _OTX2_EP_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c
new file mode 100644
index 000000000..091f1827c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_mempool.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "otx2_common.h"
+#include "otx2_ep_rawdev.h"
+
+#define SDP_IOQ_NUM_BUFS (4 * 1024)
+#define SDP_IOQ_BUF_SIZE (2 * 1024)
+
+#define SDP_TEST_PKT_FSZ (0)
+#define SDP_TEST_PKT_SIZE (1024)
+
+static int
+sdp_validate_data(struct sdp_droq_pkt *oq_pkt, uint8_t *iq_pkt,
+ uint32_t pkt_len)
+{
+ if (!oq_pkt)
+ return -EINVAL;
+
+ if (pkt_len != oq_pkt->len) {
+ otx2_err("Invalid packet length");
+ return -EINVAL;
+ }
+
+ if (memcmp(oq_pkt->data, iq_pkt, pkt_len) != 0) {
+ otx2_err("Data validation failed");
+ return -EINVAL;
+ }
+ otx2_sdp_dbg("Data validation successful");
+
+ return 0;
+}
+
+static void
+sdp_ioq_buffer_fill(uint8_t *addr, uint32_t len)
+{
+ uint32_t idx;
+
+ memset(addr, 0, len);
+
+ for (idx = 0; idx < len; idx++)
+ addr[idx] = idx;
+}
+
+static struct rte_mempool*
+sdp_ioq_mempool_create(void)
+{
+ struct rte_mempool *mpool;
+
+ mpool = rte_mempool_create("ioqbuf_pool",
+ SDP_IOQ_NUM_BUFS /*num elt*/,
+ SDP_IOQ_BUF_SIZE /*elt size*/,
+ 0 /*cache_size*/,
+ 0 /*private_data_size*/,
+ NULL /*mp_init*/,
+ NULL /*mp_init arg*/,
+ NULL /*obj_init*/,
+ NULL /*obj_init arg*/,
+ rte_socket_id() /*socket id*/,
+ (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET));
+
+ return mpool;
+}
+
+
+int
+sdp_rawdev_selftest(uint16_t dev_id)
+{
+ struct sdp_rawdev_info app_info = {0};
+ struct rte_rawdev_info dev_info = {0};
+
+ struct rte_rawdev_buf *d_buf[1];
+ struct sdp_droq_pkt oq_pkt;
+ struct sdp_soft_instr si;
+ struct sdp_device sdpvf;
+
+ uint32_t buf_size;
+ int ret = 0;
+ void *buf;
+
+ otx2_info("SDP RAWDEV Self Test: Started");
+
+ memset(&oq_pkt, 0x00, sizeof(oq_pkt));
+ d_buf[0] = (struct rte_rawdev_buf *)&oq_pkt;
+
+ struct rte_mempool *ioq_mpool = sdp_ioq_mempool_create();
+ if (!ioq_mpool) {
+ otx2_err("IOQ mpool creation failed");
+ return -ENOMEM;
+ }
+
+ app_info.enqdeq_mpool = ioq_mpool;
+ app_info.app_conf = NULL; /* Use default conf */
+
+ dev_info.dev_private = &app_info;
+
+ ret = rte_rawdev_configure(dev_id, &dev_info);
+ if (ret) {
+ otx2_err("Unable to configure SDP_VF %d", dev_id);
+ rte_mempool_free(ioq_mpool);
+ return -ENOMEM;
+ }
+ otx2_info("SDP VF rawdev[%d] configured successfully", dev_id);
+
+ memset(&si, 0x00, sizeof(si));
+ memset(&sdpvf, 0x00, sizeof(sdpvf));
+
+ buf_size = SDP_TEST_PKT_SIZE;
+
+ si.q_no = 0;
+ si.reqtype = SDP_REQTYPE_NORESP;
+ si.rptr = NULL;
+
+ si.ih.fsz = SDP_TEST_PKT_FSZ;
+ si.ih.tlen = buf_size;
+ si.ih.gather = 0;
+
+ /* Enqueue raw pkt data */
+ rte_mempool_get(ioq_mpool, &buf);
+ if (!buf) {
+ otx2_err("Buffer allocation failed");
+ rte_mempool_free(ioq_mpool);
+ rte_rawdev_close(dev_id);
+ return -ENOMEM;
+ }
+
+ sdp_ioq_buffer_fill(buf, buf_size);
+ si.dptr = (uint8_t *)buf;
+
+ rte_rawdev_enqueue_buffers(dev_id, NULL, 1, &si);
+ usleep(10000);
+
+ /* Dequeue raw pkt data */
+ ret = 0;
+ while (ret < 1) {
+ ret = rte_rawdev_dequeue_buffers(dev_id, &d_buf[0], 1, &si);
+ rte_pause();
+ }
+
+ /* Validate the dequeued raw pkt data */
+ if (sdp_validate_data((struct sdp_droq_pkt *)d_buf[0],
+ buf, buf_size) != 0) {
+ otx2_err("Data invalid");
+ rte_mempool_put(ioq_mpool,
+ ((struct sdp_droq_pkt *)d_buf[0])->data);
+ rte_mempool_free(ioq_mpool);
+ rte_rawdev_close(dev_id);
+ return -EINVAL;
+ }
+
+ rte_mempool_put(ioq_mpool, ((struct sdp_droq_pkt *)d_buf[0])->data);
+ rte_mempool_free(ioq_mpool);
+ rte_rawdev_close(dev_id);
+
+ otx2_info("SDP RAWDEV Self Test: Successful");
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c
new file mode 100644
index 000000000..bf2a19e36
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "otx2_common.h"
+#include "otx2_ep_rawdev.h"
+#include "otx2_ep_vf.h"
+
+static int
+sdp_vf_reset_iq(struct sdp_device *sdpvf, int q_no)
+{
+ uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ /* There is no RST for a ring.
+ * Clear all registers one by one after disabling the ring
+ */
+
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_BADDR(q_no));
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF; /* ~0ull */
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+ d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+
+ while ((d64 != 0) && loop--) {
+ otx2_write64(d64, sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no));
+
+ rte_delay_ms(1);
+
+ d64 = otx2_read64(sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no));
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ while ((d64 != 0) && loop--) {
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+
+ rte_delay_ms(1);
+
+ d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(q_no));
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_PKT_CNT(q_no));
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_BYTE_CNT(q_no));
+
+ return 0;
+}
+
+static int
+sdp_vf_reset_oq(struct sdp_device *sdpvf, int q_no)
+{
+ uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(q_no));
+
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF;
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+
+ while ((d64 != 0) && loop--) {
+ otx2_write64(d64, sdpvf->hw_addr +
+ SDP_VF_R_OUT_SLIST_DBELL(q_no));
+
+ rte_delay_ms(1);
+
+ d64 = otx2_read64(sdpvf->hw_addr +
+ SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ while ((d64 != 0) && (loop--)) {
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+
+ rte_delay_ms(1);
+
+ d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_INT_LEVELS(q_no));
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_PKT_CNT(q_no));
+ otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_BYTE_CNT(q_no));
+
+ return 0;
+}
+
+static void
+sdp_vf_setup_global_iq_reg(struct sdp_device *sdpvf, int q_no)
+{
+ volatile uint64_t reg_val = 0ull;
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for IQs
+ * IS_64B is by default enabled.
+ */
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
+
+ reg_val |= SDP_VF_R_IN_CTL_RDSIZE;
+ reg_val |= SDP_VF_R_IN_CTL_IS_64B;
+ reg_val |= SDP_VF_R_IN_CTL_ESR;
+
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
+
+}
+
+static void
+sdp_vf_setup_global_oq_reg(struct sdp_device *sdpvf, int q_no)
+{
+ volatile uint64_t reg_val = 0ull;
+
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
+
+ reg_val |= (SDP_VF_R_OUT_CTL_IMODE);
+
+ reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(SDP_VF_R_OUT_CTL_ES_D);
+
+ /* INFO/DATA ptr swap is required */
+ reg_val |= (SDP_VF_R_OUT_CTL_ES_P);
+
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
+
+}
+
+static int
+sdp_vf_reset_input_queues(struct sdp_device *sdpvf)
+{
+ uint32_t q_no = 0;
+
+ otx2_sdp_dbg("%s :", __func__);
+
+ for (q_no = 0; q_no < sdpvf->sriov_info.rings_per_vf; q_no++)
+ sdp_vf_reset_iq(sdpvf, q_no);
+
+ return 0;
+}
+
+static int
+sdp_vf_reset_output_queues(struct sdp_device *sdpvf)
+{
+ uint64_t q_no = 0ull;
+
+ otx2_sdp_dbg(" %s :", __func__);
+
+ for (q_no = 0; q_no < sdpvf->sriov_info.rings_per_vf; q_no++)
+ sdp_vf_reset_oq(sdpvf, q_no);
+
+ return 0;
+}
+
+static void
+sdp_vf_setup_global_input_regs(struct sdp_device *sdpvf)
+{
+ uint64_t q_no = 0ull;
+
+ sdp_vf_reset_input_queues(sdpvf);
+
+ for (q_no = 0; q_no < (sdpvf->sriov_info.rings_per_vf); q_no++)
+ sdp_vf_setup_global_iq_reg(sdpvf, q_no);
+}
+
+static void
+sdp_vf_setup_global_output_regs(struct sdp_device *sdpvf)
+{
+ uint32_t q_no;
+
+ sdp_vf_reset_output_queues(sdpvf);
+
+ for (q_no = 0; q_no < (sdpvf->sriov_info.rings_per_vf); q_no++)
+ sdp_vf_setup_global_oq_reg(sdpvf, q_no);
+
+}
+
+static int
+sdp_vf_setup_device_regs(struct sdp_device *sdpvf)
+{
+ sdp_vf_setup_global_input_regs(sdpvf);
+ sdp_vf_setup_global_output_regs(sdpvf);
+
+ return 0;
+}
+
+static void
+sdp_vf_setup_iq_regs(struct sdp_device *sdpvf, uint32_t iq_no)
+{
+ struct sdp_instr_queue *iq = sdpvf->instr_queue[iq_no];
+ volatile uint64_t reg_val = 0ull;
+
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
+
+ /* Wait till IDLE to set to 1, not supposed to configure BADDR
+ * as long as IDLE is 0
+ */
+ if (!(reg_val & SDP_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = otx2_read64(sdpvf->hw_addr +
+ SDP_VF_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & SDP_VF_R_IN_CTL_IDLE));
+ }
+
+ /* Write the start of the input queue's ring and its size */
+ otx2_write64(iq->base_addr_dma, sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_BADDR(iq_no));
+ otx2_write64(iq->nb_desc, sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_RSIZE(iq_no));
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = (uint8_t *) sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = (uint8_t *) sdpvf->hw_addr +
+ SDP_VF_R_IN_CNTS(iq_no);
+
+ otx2_sdp_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instrn counter(used in flush_iq calculation) */
+ iq->reset_instr_cnt = rte_read32(iq->inst_cnt_reg);
+
+ /* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
+ * to raise
+ */
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+ reg_val = 0xffffffff;
+
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+
+}
+
+static void
+sdp_vf_setup_oq_regs(struct sdp_device *sdpvf, uint32_t oq_no)
+{
+ volatile uint64_t reg_val = 0ull;
+ uint64_t oq_ctl = 0ull;
+
+ struct sdp_droq *droq = sdpvf->droq[oq_no];
+
+ /* Wait on IDLE to set to 1, supposed to configure BADDR
+ * as log as IDLE is 0
+ */
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
+
+ while (!(reg_val & SDP_VF_R_OUT_CTL_IDLE)) {
+ reg_val = otx2_read64(sdpvf->hw_addr +
+ SDP_VF_R_OUT_CONTROL(oq_no));
+ }
+
+ otx2_write64(droq->desc_ring_dma, sdpvf->hw_addr +
+ SDP_VF_R_OUT_SLIST_BADDR(oq_no));
+ otx2_write64(droq->nb_desc, sdpvf->hw_addr +
+ SDP_VF_R_OUT_SLIST_RSIZE(oq_no));
+
+ oq_ctl = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
+
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~(0x7fffffull);
+
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (droq->buffer_size & 0xffff);
+
+ /* Populate ISIZE(22-16) */
+ oq_ctl |= ((SDP_RH_SIZE << 16) & 0x7fffff);
+ otx2_write64(oq_ctl, sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
+
+ /* Mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg = (uint8_t *) sdpvf->hw_addr +
+ SDP_VF_R_OUT_CNTS(oq_no);
+ droq->pkts_credit_reg = (uint8_t *) sdpvf->hw_addr +
+ SDP_VF_R_OUT_SLIST_DBELL(oq_no);
+
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_INT_LEVELS(oq_no));
+
+ /* Clear PKT_CNT register */
+ rte_write64(0xFFFFFFFFF, (uint8_t *)sdpvf->hw_addr +
+ SDP_VF_R_OUT_PKT_CNT(oq_no));
+
+ /* Clear the OQ doorbell */
+ rte_write32(0xFFFFFFFF, droq->pkts_credit_reg);
+ while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
+ rte_write32(0xFFFFFFFF, droq->pkts_credit_reg);
+ rte_delay_ms(1);
+ }
+ otx2_sdp_dbg("SDP_R[%d]_credit:%x", oq_no,
+ rte_read32(droq->pkts_credit_reg));
+
+ /* Clear the OQ_OUT_CNTS doorbell */
+ reg_val = rte_read32(droq->pkts_sent_reg);
+ rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
+
+ otx2_sdp_dbg("SDP_R[%d]_sent: %x", oq_no,
+ rte_read32(droq->pkts_sent_reg));
+
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ reg_val = rte_read32(droq->pkts_sent_reg);
+ rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
+ rte_delay_ms(1);
+ }
+
+}
+
+static void
+sdp_vf_enable_iq(struct sdp_device *sdpvf, uint32_t q_no)
+{
+ volatile uint64_t reg_val = 0ull;
+ uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
+
+ /* Resetting doorbells during IQ enabling also to handle abrupt
+ * guest reboot. IQ reset does not clear the doorbells.
+ */
+ otx2_write64(0xFFFFFFFF, sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no));
+
+ while (((otx2_read64(sdpvf->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no))) != 0ull) && loop--) {
+
+ rte_delay_ms(1);
+ }
+
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+ reg_val |= 0x1ull;
+
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+
+ otx2_info("IQ[%d] enable done", q_no);
+
+}
+
+static void
+sdp_vf_enable_oq(struct sdp_device *sdpvf, uint32_t q_no)
+{
+ volatile uint64_t reg_val = 0ull;
+
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+ reg_val |= 0x1ull;
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+
+ otx2_info("OQ[%d] enable done", q_no);
+}
+
+static void
+sdp_vf_enable_io_queues(struct sdp_device *sdpvf)
+{
+ uint32_t q_no = 0;
+
+ for (q_no = 0; q_no < sdpvf->num_iqs; q_no++)
+ sdp_vf_enable_iq(sdpvf, q_no);
+
+ for (q_no = 0; q_no < sdpvf->num_oqs; q_no++)
+ sdp_vf_enable_oq(sdpvf, q_no);
+}
+
+static void
+sdp_vf_disable_iq(struct sdp_device *sdpvf, uint32_t q_no)
+{
+ volatile uint64_t reg_val = 0ull;
+
+ /* Reset the doorbell register for this Input Queue. */
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+ reg_val &= ~0x1ull;
+
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+}
+
+static void
+sdp_vf_disable_oq(struct sdp_device *sdpvf, uint32_t q_no)
+{
+ volatile uint64_t reg_val = 0ull;
+
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+ reg_val &= ~0x1ull;
+
+ otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+
+}
+
+static void
+sdp_vf_disable_io_queues(struct sdp_device *sdpvf)
+{
+ uint32_t q_no = 0;
+
+ /* Disable Input Queues. */
+ for (q_no = 0; q_no < sdpvf->num_iqs; q_no++)
+ sdp_vf_disable_iq(sdpvf, q_no);
+
+ /* Disable Output Queues. */
+ for (q_no = 0; q_no < sdpvf->num_oqs; q_no++)
+ sdp_vf_disable_oq(sdpvf, q_no);
+}
+
+static uint32_t
+sdp_vf_update_read_index(struct sdp_instr_queue *iq)
+{
+ uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over.
+ * We have noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->nb_desc;
+
+ return new_idx;
+}
+
+int
+sdp_vf_setup_device(struct sdp_device *sdpvf)
+{
+ uint64_t reg_val = 0ull;
+
+ /* If application doesn't provide its conf, use driver default conf */
+ if (sdpvf->conf == NULL) {
+ sdpvf->conf = sdp_get_defconf(sdpvf);
+ if (sdpvf->conf == NULL) {
+ otx2_err("SDP VF default config not found");
+ return -ENOMEM;
+ }
+ otx2_info("Default config is used");
+ }
+
+ /* Get IOQs (RPVF] count */
+ reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(0));
+
+ sdpvf->sriov_info.rings_per_vf = ((reg_val >> SDP_VF_R_IN_CTL_RPVF_POS)
+ & SDP_VF_R_IN_CTL_RPVF_MASK);
+
+ otx2_info("SDP RPVF: %d", sdpvf->sriov_info.rings_per_vf);
+
+ sdpvf->fn_list.setup_iq_regs = sdp_vf_setup_iq_regs;
+ sdpvf->fn_list.setup_oq_regs = sdp_vf_setup_oq_regs;
+
+ sdpvf->fn_list.setup_device_regs = sdp_vf_setup_device_regs;
+ sdpvf->fn_list.update_iq_read_idx = sdp_vf_update_read_index;
+
+ sdpvf->fn_list.enable_io_queues = sdp_vf_enable_io_queues;
+ sdpvf->fn_list.disable_io_queues = sdp_vf_disable_io_queues;
+
+ sdpvf->fn_list.enable_iq = sdp_vf_enable_iq;
+ sdpvf->fn_list.disable_iq = sdp_vf_disable_iq;
+
+ sdpvf->fn_list.enable_oq = sdp_vf_enable_oq;
+ sdpvf->fn_list.disable_oq = sdp_vf_disable_oq;
+
+
+ return 0;
+
+}
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h
new file mode 100644
index 000000000..996f2e51e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+#ifndef _OTX2_EP_VF_H_
+#define _OTX2_EP_VF_H_
+
+int
+sdp_vf_setup_device(struct sdp_device *sdpvf);
+
+#endif /*_OTX2_EP_VF_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map b/src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map
new file mode 100644
index 000000000..acdaf587d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map
@@ -0,0 +1,4 @@
+DPDK_21 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/skeleton/Makefile b/src/spdk/dpdk/drivers/raw/skeleton/Makefile
new file mode 100644
index 000000000..2ac66fbd4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_rawdev_skeleton.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+EXPORT_MAP := rte_rawdev_skeleton_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev_test.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/skeleton/meson.build b/src/spdk/dpdk/drivers/raw/skeleton/meson.build
new file mode 100644
index 000000000..b4a6ed08a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']
+sources = files('skeleton_rawdev.c',
+ 'skeleton_rawdev_test.c')
diff --git a/src/spdk/dpdk/drivers/raw/skeleton/rte_rawdev_skeleton_version.map b/src/spdk/dpdk/drivers/raw/skeleton/rte_rawdev_skeleton_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton/rte_rawdev_skeleton_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.c b/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.c
new file mode 100644
index 000000000..586183a5b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.c
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_lcore.h>
+#include <rte_bus_vdev.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "skeleton_rawdev.h"
+
+/* Dynamic log type identifier */
+int skeleton_pmd_logtype;
+
+/* Count of instances */
+static uint16_t skeldev_init_once;
+
+/**< Rawdev Skeleton dummy driver name */
+#define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton
+
+struct queue_buffers {
+ void *bufs[SKELETON_QUEUE_MAX_DEPTH];
+};
+
+static struct queue_buffers queue_buf[SKELETON_MAX_QUEUES] = {};
+static void clear_queue_bufs(int queue_id);
+
+static void skeleton_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_conf *skeldev_conf;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ SKELETON_PMD_ERR("Invalid request");
+ return;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ skeldev_conf = dev_info;
+
+ skeldev_conf->num_queues = skeldev->num_queues;
+ skeldev_conf->capabilities = skeldev->capabilities;
+ skeldev_conf->device_state = skeldev->device_state;
+ skeldev_conf->firmware_state = skeldev->fw.firmware_state;
+}
+
+static int skeleton_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_conf *skeldev_conf;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ if (!config) {
+ SKELETON_PMD_ERR("Invalid configuration");
+ return -EINVAL;
+ }
+
+ skeldev_conf = config;
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ if (skeldev_conf->num_queues <= SKELETON_MAX_QUEUES)
+ skeldev->num_queues = skeldev_conf->num_queues;
+ else
+ return -EINVAL;
+
+ skeldev->capabilities = skeldev_conf->capabilities;
+ skeldev->num_queues = skeldev_conf->num_queues;
+
+ return 0;
+}
+
+static int skeleton_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+ enum skeleton_firmware_state fw_state;
+ enum skeleton_device_state device_state;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ fw_state = skeldev->fw.firmware_state;
+ device_state = skeldev->device_state;
+
+ if (fw_state == SKELETON_FW_LOADED &&
+ device_state == SKELETON_DEV_STOPPED) {
+ skeldev->device_state = SKELETON_DEV_RUNNING;
+ } else {
+ SKELETON_PMD_ERR("Device not ready for starting");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void skeleton_rawdev_stop(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (dev) {
+ skeldev = skeleton_rawdev_get_priv(dev);
+ skeldev->device_state = SKELETON_DEV_STOPPED;
+ }
+}
+
+static void
+reset_queues(struct skeleton_rawdev *skeldev)
+{
+ int i;
+
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
+ skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
+ skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
+ }
+}
+
+static void
+reset_attribute_table(struct skeleton_rawdev *skeldev)
+{
+ int i;
+
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (skeldev->attr[i].name) {
+ free(skeldev->attr[i].name);
+ skeldev->attr[i].name = NULL;
+ }
+ }
+}
+
+static int skeleton_rawdev_close(struct rte_rawdev *dev)
+{
+ int ret = 0, i;
+ struct skeleton_rawdev *skeldev;
+ enum skeleton_firmware_state fw_state;
+ enum skeleton_device_state device_state;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ fw_state = skeldev->fw.firmware_state;
+ device_state = skeldev->device_state;
+
+ reset_queues(skeldev);
+ reset_attribute_table(skeldev);
+
+ switch (fw_state) {
+ case SKELETON_FW_LOADED:
+ if (device_state == SKELETON_DEV_RUNNING) {
+ SKELETON_PMD_ERR("Cannot close running device");
+ ret = -EINVAL;
+ } else {
+ /* Probably call fw reset here */
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+ }
+ break;
+ case SKELETON_FW_READY:
+ case SKELETON_FW_ERROR:
+ default:
+ SKELETON_PMD_DEBUG("Device already in stopped state");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Clear all allocated queues */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++)
+ clear_queue_bufs(i);
+
+ return ret;
+}
+
+static int skeleton_rawdev_reset(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ SKELETON_PMD_DEBUG("Resetting device");
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+
+ return 0;
+}
+
+static void skeleton_rawdev_queue_def_conf(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_queue *skelq;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !queue_conf)
+ return;
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ skelq = &skeldev->queues[queue_id];
+
+ if (queue_id < SKELETON_MAX_QUEUES)
+ rte_memcpy(queue_conf, skelq,
+ sizeof(struct skeleton_rawdev_queue));
+}
+
+static void
+clear_queue_bufs(int queue_id)
+{
+ int i;
+
+ /* Clear buffers for queue_id */
+ for (i = 0; i < SKELETON_QUEUE_MAX_DEPTH; i++)
+ queue_buf[queue_id].bufs[i] = NULL;
+}
+
+static int skeleton_rawdev_queue_setup(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_queue *q;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !queue_conf)
+ return -EINVAL;
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ q = &skeldev->queues[queue_id];
+
+ if (skeldev->num_queues > queue_id &&
+ q->depth < SKELETON_QUEUE_MAX_DEPTH) {
+ rte_memcpy(q, queue_conf,
+ sizeof(struct skeleton_rawdev_queue));
+ clear_queue_bufs(queue_id);
+ } else {
+ SKELETON_PMD_ERR("Invalid queue configuration");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
+ uint16_t queue_id)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ if (skeldev->num_queues > queue_id) {
+ skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH;
+ skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH;
+ clear_queue_bufs(queue_id);
+ } else {
+ SKELETON_PMD_ERR("Invalid queue configuration");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ return skeldev->num_queues;
+}
+
+static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ int i;
+ uint8_t done = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name || !attr_value) {
+ SKELETON_PMD_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (!skeldev->attr[i].name)
+ continue;
+
+ if (!strncmp(skeldev->attr[i].name, attr_name,
+ SKELETON_ATTRIBUTE_NAME_MAX)) {
+ *attr_value = skeldev->attr[i].value;
+ done = 1;
+ SKELETON_PMD_DEBUG("Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ break;
+ }
+ }
+
+ if (done)
+ return 0;
+
+ /* Attribute not found */
+ return -EINVAL;
+}
+
+static int skeleton_rawdev_set_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ const uint64_t attr_value)
+{
+ int i;
+ uint8_t done = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name) {
+ SKELETON_PMD_ERR("Invalid arguments for setting attributes");
+ return -EINVAL;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ /* Check if attribute already exists */
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (!skeldev->attr[i].name)
+ break;
+
+ if (!strncmp(skeldev->attr[i].name, attr_name,
+ SKELETON_ATTRIBUTE_NAME_MAX)) {
+ /* Update value */
+ skeldev->attr[i].value = attr_value;
+ done = 1;
+ break;
+ }
+ }
+
+ if (!done) {
+ if (i < (SKELETON_MAX_ATTRIBUTES - 1)) {
+ /* There is still space to insert one more */
+ skeldev->attr[i].name = strdup(attr_name);
+ if (!skeldev->attr[i].name)
+ return -ENOMEM;
+
+ skeldev->attr[i].value = attr_value;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ unsigned int i;
+ uint16_t q_id;
+ RTE_SET_USED(dev);
+
+ /* context is essentially the queue_id which is
+ * transferred as opaque object through the library layer. This can
+ * help in complex implementation which require more information than
+ * just an integer - for example, a queue-pair.
+ */
+ q_id = *((int *)context);
+
+ for (i = 0; i < count; i++)
+ queue_buf[q_id].bufs[i] = buffers[i]->buf_addr;
+
+ return i;
+}
+
+static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ unsigned int i;
+ uint16_t q_id;
+ RTE_SET_USED(dev);
+
+ /* context is essentially the queue_id which is
+ * transferred as opaque object through the library layer. This can
+ * help in complex implementation which require more information than
+ * just an integer - for example, a queue-pair.
+ */
+ q_id = *((int *)context);
+
+ for (i = 0; i < count; i++)
+ buffers[i]->buf_addr = queue_buf[q_id].bufs[i];
+
+ return i;
+}
+
+static int skeleton_rawdev_dump(struct rte_rawdev *dev, FILE *f)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t status_info)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ if (status_info)
+ memcpy(status_info, &skeldev->fw.firmware_state,
+ sizeof(enum skeleton_firmware_state));
+
+ return 0;
+}
+
+
+static int skeleton_rawdev_firmware_version_get(
+ struct rte_rawdev *dev,
+ rte_rawdev_obj_t version_info)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_firmware_version_info *vi;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ vi = version_info;
+
+ vi->major = skeldev->fw.firmware_version.major;
+ vi->minor = skeldev->fw.firmware_version.minor;
+ vi->subrel = skeldev->fw.firmware_version.subrel;
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_load(struct rte_rawdev *dev,
+ rte_rawdev_obj_t firmware_buf)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ /* firmware_buf is a mmaped, possibly DMA'able area, buffer. Being
+ * dummy, all this does is check if firmware_buf is not NULL and
+ * sets the state of the firmware.
+ */
+ if (!firmware_buf)
+ return -EINVAL;
+
+ skeldev->fw.firmware_state = SKELETON_FW_LOADED;
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_unload(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops skeleton_rawdev_ops = {
+ .dev_info_get = skeleton_rawdev_info_get,
+ .dev_configure = skeleton_rawdev_configure,
+ .dev_start = skeleton_rawdev_start,
+ .dev_stop = skeleton_rawdev_stop,
+ .dev_close = skeleton_rawdev_close,
+ .dev_reset = skeleton_rawdev_reset,
+
+ .queue_def_conf = skeleton_rawdev_queue_def_conf,
+ .queue_setup = skeleton_rawdev_queue_setup,
+ .queue_release = skeleton_rawdev_queue_release,
+ .queue_count = skeleton_rawdev_queue_count,
+
+ .attr_get = skeleton_rawdev_get_attr,
+ .attr_set = skeleton_rawdev_set_attr,
+
+ .enqueue_bufs = skeleton_rawdev_enqueue_bufs,
+ .dequeue_bufs = skeleton_rawdev_dequeue_bufs,
+
+ .dump = skeleton_rawdev_dump,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = skeleton_rawdev_firmware_status_get,
+ .firmware_version_get = skeleton_rawdev_firmware_version_get,
+ .firmware_load = skeleton_rawdev_firmware_load,
+ .firmware_unload = skeleton_rawdev_firmware_unload,
+
+ .dev_selftest = test_rawdev_skeldev,
+};
+
+static int
+skeleton_rawdev_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ int ret = 0, i;
+ struct rte_rawdev *rawdev = NULL;
+ struct skeleton_rawdev *skeldev = NULL;
+
+ if (!name) {
+ SKELETON_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct skeleton_rawdev),
+ socket_id);
+ if (rawdev == NULL) {
+ SKELETON_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = rawdev->dev_id; /* return the rawdev id of new device */
+
+ rawdev->dev_ops = &skeleton_rawdev_ops;
+ rawdev->device = &vdev->device;
+
+ skeldev = skeleton_rawdev_get_priv(rawdev);
+
+ skeldev->device_id = SKELETON_DEVICE_ID;
+ skeldev->vendor_id = SKELETON_VENDOR_ID;
+ skeldev->capabilities = SKELETON_DEFAULT_CAPA;
+
+ memset(&skeldev->fw, 0, sizeof(struct skeleton_firmware));
+
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+ skeldev->fw.firmware_version.major = SKELETON_MAJOR_VER;
+ skeldev->fw.firmware_version.minor = SKELETON_MINOR_VER;
+ skeldev->fw.firmware_version.subrel = SKELETON_SUB_VER;
+
+ skeldev->device_state = SKELETON_DEV_STOPPED;
+
+ /* Reset/set to default queue configuration for this device */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
+ skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
+ skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
+ }
+
+ /* Clear all allocated queue buffers */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++)
+ clear_queue_bufs(i);
+
+ return ret;
+
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+skeleton_rawdev_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ if (!name) {
+ SKELETON_PMD_ERR("Invalid device name");
+ return -EINVAL;
+ }
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ SKELETON_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ SKELETON_PMD_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+skeldev_get_selftest(const char *key __rte_unused,
+ const char *value,
+ void *opaque)
+{
+ int *flag = opaque;
+ *flag = atoi(value);
+ return 0;
+}
+
+static int
+skeldev_parse_vdev_args(struct rte_vdev_device *vdev)
+{
+ int selftest = 0;
+ const char *name;
+ const char *params;
+
+ static const char *const args[] = {
+ SKELETON_SELFTEST_ARG,
+ NULL
+ };
+
+ name = rte_vdev_device_name(vdev);
+
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ SKELETON_PMD_INFO(
+ "Ignoring unsupported params supplied '%s'",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist,
+ SKELETON_SELFTEST_ARG,
+ skeldev_get_selftest, &selftest);
+ if (ret != 0 || (selftest < 0 || selftest > 1)) {
+ SKELETON_PMD_ERR("%s: Error in parsing args",
+ name);
+ rte_kvargs_free(kvlist);
+ ret = -1; /* enforce if selftest is invalid */
+ return ret;
+ }
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+
+ return selftest;
+}
+
+static int
+skeleton_rawdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int selftest = 0, ret = 0;
+
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ /* More than one instance is not supported */
+ if (skeldev_init_once) {
+ SKELETON_PMD_ERR("Multiple instance not supported for %s",
+ name);
+ return -EINVAL;
+ }
+
+ SKELETON_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ selftest = skeldev_parse_vdev_args(vdev);
+ /* In case of invalid argument, selftest != 1; ignore other values */
+
+ ret = skeleton_rawdev_create(name, vdev, rte_socket_id());
+ if (ret >= 0) {
+ /* In case command line argument for 'selftest' was passed;
+ * if invalid arguments were passed, execution continues but
+ * without selftest.
+ */
+ if (selftest == 1)
+ test_rawdev_skeldev(ret);
+ }
+
+ /* Device instance created; Second instance not possible */
+ skeldev_init_once = 1;
+
+ return ret < 0 ? ret : 0;
+}
+
+static int
+skeleton_rawdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -1;
+
+ SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = skeleton_rawdev_destroy(name);
+ if (!ret)
+ skeldev_init_once = 0;
+
+ return ret;
+}
+
+static struct rte_vdev_driver skeleton_pmd_drv = {
+ .probe = skeleton_rawdev_probe,
+ .remove = skeleton_rawdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
+
+RTE_INIT(skeleton_pmd_init_log)
+{
+ skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
+ if (skeleton_pmd_logtype >= 0)
+ rte_log_set_level(skeleton_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.h b/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.h
new file mode 100644
index 000000000..c3f92e72a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef __SKELETON_RAWDEV_H__
+#define __SKELETON_RAWDEV_H__
+
+#include <rte_rawdev.h>
+
+extern int skeleton_pmd_logtype;
+
+#define SKELETON_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, skeleton_pmd_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define SKELETON_PMD_FUNC_TRACE() SKELETON_PMD_LOG(DEBUG, ">>")
+
+#define SKELETON_PMD_DEBUG(fmt, args...) \
+ SKELETON_PMD_LOG(DEBUG, fmt, ## args)
+#define SKELETON_PMD_INFO(fmt, args...) \
+ SKELETON_PMD_LOG(INFO, fmt, ## args)
+#define SKELETON_PMD_ERR(fmt, args...) \
+ SKELETON_PMD_LOG(ERR, fmt, ## args)
+#define SKELETON_PMD_WARN(fmt, args...) \
+ SKELETON_PMD_LOG(WARNING, fmt, ## args)
+/* Macros for self test application */
+#define SKELETON_TEST_INFO SKELETON_PMD_INFO
+#define SKELETON_TEST_DEBUG SKELETON_PMD_DEBUG
+#define SKELETON_TEST_ERR SKELETON_PMD_ERR
+#define SKELETON_TEST_WARN SKELETON_PMD_WARN
+
+#define SKELETON_SELFTEST_ARG ("selftest")
+
+#define SKELETON_VENDOR_ID 0x10
+#define SKELETON_DEVICE_ID 0x01
+
+#define SKELETON_MAJOR_VER 1
+#define SKELETON_MINOR_VER 0
+#define SKELETON_SUB_VER 0
+
+#define SKELETON_MAX_QUEUES 1
+
+enum skeleton_firmware_state {
+ SKELETON_FW_READY,
+ SKELETON_FW_LOADED,
+ SKELETON_FW_ERROR
+};
+
+enum skeleton_device_state {
+ SKELETON_DEV_RUNNING,
+ SKELETON_DEV_STOPPED
+};
+
+enum skeleton_queue_state {
+ SKELETON_QUEUE_DETACH,
+ SKELETON_QUEUE_ATTACH
+};
+
+#define SKELETON_QUEUE_DEF_DEPTH 10
+#define SKELETON_QUEUE_MAX_DEPTH 25
+
+struct skeleton_firmware_version_info {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t subrel;
+};
+
+struct skeleton_firmware {
+ /**< Device firmware information */
+ struct skeleton_firmware_version_info firmware_version;
+ /**< Device state */
+ enum skeleton_firmware_state firmware_state;
+
+};
+
+#define SKELETON_MAX_ATTRIBUTES 10
+#define SKELETON_ATTRIBUTE_NAME_MAX 20
+
+struct skeleton_rawdev_attributes {
+ /**< Name of the attribute */
+ char *name;
+ /**< Value or reference of value of attribute */
+ uint64_t value;
+};
+
+/**< Device supports firmware loading/unloading */
+#define SKELETON_CAPA_FW_LOAD 0x0001
+/**< Device supports firmware reset */
+#define SKELETON_CAPA_FW_RESET 0x0002
+/**< Device support queue based communication */
+#define SKELETON_CAPA_QUEUES 0x0004
+/**< Default Capabilities: FW_LOAD, FW_RESET, QUEUES */
+#define SKELETON_DEFAULT_CAPA 0x7
+
+struct skeleton_rawdev_queue {
+ uint8_t state;
+ uint32_t depth;
+};
+
+struct skeleton_rawdev {
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t num_queues;
+ /**< One of SKELETON_CAPA_* */
+ uint16_t capabilities;
+ /**< State of device; linked to firmware state */
+ enum skeleton_device_state device_state;
+ /**< Firmware configuration */
+ struct skeleton_firmware fw;
+ /**< Collection of all communication channels - which can be referred
+ * to as queues.
+ */
+ struct skeleton_rawdev_queue queues[SKELETON_MAX_QUEUES];
+ /**< Global table containing various pre-defined and user-defined
+ * attributes.
+ */
+ struct skeleton_rawdev_attributes attr[SKELETON_MAX_ATTRIBUTES];
+ struct rte_device *device;
+};
+
+struct skeleton_rawdev_conf {
+ uint16_t num_queues;
+ unsigned int capabilities;
+ enum skeleton_device_state device_state;
+ enum skeleton_firmware_state firmware_state;
+};
+
+static inline struct skeleton_rawdev *
+skeleton_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+int test_rawdev_skeldev(uint16_t dev_id);
+
+#endif /* __SKELETON_RAWDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c b/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c
new file mode 100644
index 000000000..9ecfdee81
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_rawdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+/* Using relative path as skeleton_rawdev is not part of exported headers */
+#include "skeleton_rawdev.h"
+
+#define TEST_DEV_NAME "rawdev_skeleton"
+
+#define SKELDEV_LOGS(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, skeleton_pmd_logtype, fmt "\n", \
+ ##args)
+
+#define SKELDEV_TEST_INFO(fmt, args...) \
+ SKELDEV_LOGS(INFO, fmt, ## args)
+#define SKELDEV_TEST_DEBUG(fmt, args...) \
+ SKELDEV_LOGS(DEBUG, fmt, ## args)
+
+#define SKELDEV_TEST_RUN(setup, teardown, test) \
+ skeldev_test_run(setup, teardown, test, #test)
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED -1
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static uint16_t test_dev_id;
+
+static int
+testsuite_setup(void)
+{
+ uint8_t count;
+ count = rte_rawdev_count();
+ if (!count) {
+ SKELDEV_TEST_INFO("\tNo existing rawdev; "
+ "Creating 'skeldev_rawdev'");
+ return rte_vdev_init(TEST_DEV_NAME, NULL);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static void local_teardown(void);
+
+static void
+testsuite_teardown(void)
+{
+ local_teardown();
+}
+
+static void
+local_teardown(void)
+{
+ rte_vdev_uninit(TEST_DEV_NAME);
+}
+
+static int
+test_rawdev_count(void)
+{
+ uint8_t count;
+ count = rte_rawdev_count();
+ RTE_TEST_ASSERT(count > 0, "Invalid rawdev count %" PRIu8, count);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_get_dev_id(void)
+{
+ int ret;
+ ret = rte_rawdev_get_dev_id("invalid_rawdev_device");
+ RTE_TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d",
+ ret);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_socket_id(void)
+{
+ int socket_id;
+ socket_id = rte_rawdev_socket_id(test_dev_id);
+ RTE_TEST_ASSERT(socket_id != -EINVAL,
+ "Failed to get socket_id %d", socket_id);
+ socket_id = rte_rawdev_socket_id(RTE_RAWDEV_MAX_DEVS);
+ RTE_TEST_ASSERT(socket_id == -EINVAL,
+ "Expected -EINVAL %d", socket_id);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_info_get(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf skel_conf = {0};
+
+ ret = rte_rawdev_info_get(test_dev_id, NULL);
+ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ rdev_info.dev_private = &skel_conf;
+
+ ret = rte_rawdev_info_get(test_dev_id, &rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get raw dev info");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_configure(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_set = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+
+ /* Check invalid configuration */
+ ret = rte_rawdev_configure(test_dev_id, NULL);
+ RTE_TEST_ASSERT(ret == -EINVAL,
+ "Null configure; Expected -EINVAL, got %d", ret);
+
+ /* Valid configuration test */
+ rdev_conf_set.num_queues = 1;
+ rdev_conf_set.capabilities = SKELETON_CAPA_FW_LOAD |
+ SKELETON_CAPA_FW_RESET;
+
+ rdev_info.dev_private = &rdev_conf_set;
+ ret = rte_rawdev_configure(test_dev_id,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure rawdev (%d)", ret);
+
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(test_dev_id,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_set.num_queues,
+ rdev_conf_get.num_queues,
+ "Configuration test failed; num_queues (%d)(%d)",
+ rdev_conf_set.num_queues,
+ rdev_conf_get.num_queues);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_set.capabilities,
+ rdev_conf_get.capabilities,
+ "Configuration test failed; capabilities");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_default_conf_get(void)
+{
+ int ret, i;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ struct skeleton_rawdev_queue q = {0};
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(test_dev_id,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ /* call to test_rawdev_configure would have set the num_queues = 1 */
+ RTE_TEST_ASSERT_SUCCESS(!(rdev_conf_get.num_queues > 0),
+ "Invalid number of queues (%d). Expected 1",
+ rdev_conf_get.num_queues);
+ /* All queues by default should have state = DETACH and
+ * depth = DEF_DEPTH
+ */
+ for (i = 0; i < rdev_conf_get.num_queues; i++) {
+ rte_rawdev_queue_conf_get(test_dev_id, i, &q);
+ RTE_TEST_ASSERT_EQUAL(q.depth, SKELETON_QUEUE_DEF_DEPTH,
+ "Invalid default depth of queue (%d)",
+ q.depth);
+ RTE_TEST_ASSERT_EQUAL(q.state, SKELETON_QUEUE_DETACH,
+ "Invalid default state of queue (%d)",
+ q.state);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_count(void)
+{
+ unsigned int q_count;
+
+ /* Get the current configuration */
+ q_count = rte_rawdev_queue_count(test_dev_id);
+ RTE_TEST_ASSERT_EQUAL(q_count, 1, "Invalid queue count (%d)", q_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_setup(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ struct skeleton_rawdev_queue qset = {0};
+ struct skeleton_rawdev_queue qget = {0};
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(test_dev_id,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ /* call to test_rawdev_configure would have set the num_queues = 1 */
+ RTE_TEST_ASSERT_SUCCESS(!(rdev_conf_get.num_queues > 0),
+ "Invalid number of queues (%d). Expected 1",
+ rdev_conf_get.num_queues);
+
+ /* Modify the queue depth for Queue 0 and attach it */
+ qset.depth = 15;
+ qset.state = SKELETON_QUEUE_ATTACH;
+ ret = rte_rawdev_queue_setup(test_dev_id, 0, &qset);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue (%d)", ret);
+
+ /* Now, fetching the queue 0 should show depth as 15 */
+ ret = rte_rawdev_queue_conf_get(test_dev_id, 0, &qget);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get queue config (%d)", ret);
+
+ RTE_TEST_ASSERT_EQUAL(qset.depth, qget.depth,
+ "Failed to set queue depth: Need(%d), has(%d)",
+ qset.depth, qget.depth);
+
+ return TEST_SUCCESS;
+}
+
+/* After executing test_rawdev_queue_setup, queue_id=0 would have depth as 15.
+ * Releasing should set it back to default. state would set to DETACH
+ */
+static int
+test_rawdev_queue_release(void)
+{
+ int ret;
+ struct skeleton_rawdev_queue qget = {0};
+
+ /* Now, fetching the queue 0 should show depth as 100 */
+ ret = rte_rawdev_queue_release(test_dev_id, 0);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to release queue 0; (%d)", ret);
+
+ /* Now, fetching the queue 0 should show depth as default */
+ ret = rte_rawdev_queue_conf_get(test_dev_id, 0, &qget);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get queue config (%d)", ret);
+
+ RTE_TEST_ASSERT_EQUAL(qget.depth, SKELETON_QUEUE_DEF_DEPTH,
+ "Release of Queue 0 failed; (depth)");
+
+ RTE_TEST_ASSERT_EQUAL(qget.state, SKELETON_QUEUE_DETACH,
+ "Release of Queue 0 failed; (state)");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_attr_set_get(void)
+{
+ int ret;
+ int *dummy_value, set_value;
+ uint64_t ret_value;
+
+ /* Set an attribute and fetch it */
+ ret = rte_rawdev_set_attr(test_dev_id, "Test1", 100);
+ RTE_TEST_ASSERT(!ret, "Unable to set an attribute (Test1)");
+
+ dummy_value = &set_value;
+ *dummy_value = 200;
+ ret = rte_rawdev_set_attr(test_dev_id, "Test2", (uintptr_t)dummy_value);
+
+ /* Check if attributes have been set */
+ ret = rte_rawdev_get_attr(test_dev_id, "Test1", &ret_value);
+ RTE_TEST_ASSERT_EQUAL(ret_value, 100,
+ "Attribute (Test1) not set correctly (%" PRIu64 ")",
+ ret_value);
+
+ ret_value = 0;
+ ret = rte_rawdev_get_attr(test_dev_id, "Test2", &ret_value);
+ RTE_TEST_ASSERT_EQUAL(*((int *)(uintptr_t)ret_value), set_value,
+ "Attribute (Test2) not set correctly (%" PRIu64 ")",
+ ret_value);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_start_stop(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ char *dummy_firmware = NULL;
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+
+ /* Load a firmware using a dummy address area */
+ dummy_firmware = rte_zmalloc("RAWDEV SKELETON", sizeof(int) * 10, 0);
+ RTE_TEST_ASSERT(dummy_firmware != NULL,
+ "Failed to create firmware memory backing");
+
+ ret = rte_rawdev_firmware_load(test_dev_id, dummy_firmware);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Firmware loading failed (%d)", ret);
+
+ /* Skeleton doesn't do anything with the firmware area - that is dummy
+ * and can be removed.
+ */
+ rte_free(dummy_firmware);
+ dummy_firmware = NULL;
+
+ rte_rawdev_start(test_dev_id);
+ ret = rte_rawdev_info_get(test_dev_id, (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_get.device_state, SKELETON_DEV_RUNNING,
+ "Device start failed. State is (%d)",
+ rdev_conf_get.device_state);
+
+ rte_rawdev_stop(test_dev_id);
+ ret = rte_rawdev_info_get(test_dev_id, (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_get.device_state, SKELETON_DEV_STOPPED,
+ "Device stop failed. State is (%d)",
+ rdev_conf_get.device_state);
+
+ /* Unloading the firmware once device is stopped */
+ ret = rte_rawdev_firmware_unload(test_dev_id);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unload firmware (%d)", ret);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_enqdeq(void)
+{
+ int ret;
+ unsigned int count = 1;
+ uint16_t queue_id = 0;
+ struct rte_rawdev_buf buffers[1];
+ struct rte_rawdev_buf *deq_buffers = NULL;
+
+ buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3);
+ if (!buffers[0].buf_addr)
+ goto cleanup;
+ snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d",
+ TEST_DEV_NAME, 0);
+
+ ret = rte_rawdev_enqueue_buffers(test_dev_id,
+ (struct rte_rawdev_buf **)&buffers,
+ count, &queue_id);
+ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+ "Unable to enqueue buffers");
+
+ deq_buffers = malloc(sizeof(struct rte_rawdev_buf) * count);
+ if (!deq_buffers)
+ goto cleanup;
+
+ ret = rte_rawdev_dequeue_buffers(test_dev_id,
+ (struct rte_rawdev_buf **)&deq_buffers,
+ count, &queue_id);
+ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+ "Unable to dequeue buffers");
+
+ if (deq_buffers)
+ free(deq_buffers);
+
+ return TEST_SUCCESS;
+cleanup:
+ if (buffers[0].buf_addr)
+ free(buffers[0].buf_addr);
+
+ return TEST_FAILED;
+}
+
+static void skeldev_test_run(int (*setup)(void),
+ void (*teardown)(void),
+ int (*test)(void),
+ const char *name)
+{
+ int ret = 0;
+
+ if (setup) {
+ ret = setup();
+ if (ret < 0) {
+ SKELDEV_TEST_INFO("Error setting up test %s", name);
+ unsupported++;
+ }
+ }
+
+ if (test) {
+ ret = test();
+ if (ret < 0) {
+ failed++;
+ SKELDEV_TEST_INFO("%s Failed", name);
+ } else {
+ passed++;
+ SKELDEV_TEST_DEBUG("%s Passed", name);
+ }
+ }
+
+ if (teardown)
+ teardown();
+
+ total++;
+}
+
+int
+test_rawdev_skeldev(uint16_t dev_id)
+{
+ test_dev_id = dev_id;
+ testsuite_setup();
+
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_count);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_get_dev_id);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_socket_id);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_info_get);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_configure);
+ SKELDEV_TEST_RUN(test_rawdev_configure, NULL,
+ test_rawdev_queue_default_conf_get);
+ SKELDEV_TEST_RUN(test_rawdev_configure, NULL, test_rawdev_queue_setup);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_queue_count);
+ SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL,
+ test_rawdev_queue_release);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_attr_set_get);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_start_stop);
+ SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL, test_rawdev_enqdeq);
+
+ testsuite_teardown();
+
+ SKELDEV_TEST_INFO("Total tests : %d", total);
+ SKELDEV_TEST_INFO("Passed : %d", passed);
+ SKELDEV_TEST_INFO("Failed : %d", failed);
+ SKELDEV_TEST_INFO("Not supported : %d", unsupported);
+
+ if (failed)
+ return -1;
+
+ return 0;
+};