summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/raw/ioat
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/raw/ioat
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/raw/ioat')
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/Makefile28
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c395
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c245
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h233
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h301
-rw-r--r--src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map3
7 files changed, 1216 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/raw/ioat/Makefile b/src/spdk/dpdk/drivers/raw/ioat/Makefile
new file mode 100644
index 000000000..1609fe5e6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_rawdev_ioat.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_rawdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_mbuf -lrte_mempool
+
+# versioning export map
+EXPORT_MAP := rte_rawdev_ioat_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += ioat_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += ioat_rawdev_test.c
+
+# export include files
+SYMLINK-y-include += rte_ioat_rawdev.h
+SYMLINK-y-include += rte_ioat_spec.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c
new file mode 100644
index 000000000..a8a48f90a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev.c
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <rte_cycles.h>
+#include <rte_bus_pci.h>
+#include <rte_string_fns.h>
+#include <rte_rawdev_pmd.h>
+
+#include "rte_ioat_rawdev.h"
+
+/* Dynamic log type identifier */
+int ioat_pmd_logtype;
+
+static struct rte_pci_driver ioat_pmd_drv;
+
+#define IOAT_VENDOR_ID 0x8086
+#define IOAT_DEVICE_ID_SKX 0x2021
+#define IOAT_DEVICE_ID_BDX0 0x6f20
+#define IOAT_DEVICE_ID_BDX1 0x6f21
+#define IOAT_DEVICE_ID_BDX2 0x6f22
+#define IOAT_DEVICE_ID_BDX3 0x6f23
+#define IOAT_DEVICE_ID_BDX4 0x6f24
+#define IOAT_DEVICE_ID_BDX5 0x6f25
+#define IOAT_DEVICE_ID_BDX6 0x6f26
+#define IOAT_DEVICE_ID_BDX7 0x6f27
+#define IOAT_DEVICE_ID_BDXE 0x6f2E
+#define IOAT_DEVICE_ID_BDXF 0x6f2F
+#define IOAT_DEVICE_ID_ICX 0x0b00
+
+#define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
+ ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)
+
+#define IOAT_PMD_DEBUG(fmt, args...) IOAT_PMD_LOG(DEBUG, fmt, ## args)
+#define IOAT_PMD_INFO(fmt, args...) IOAT_PMD_LOG(INFO, fmt, ## args)
+#define IOAT_PMD_ERR(fmt, args...) IOAT_PMD_LOG(ERR, fmt, ## args)
+#define IOAT_PMD_WARN(fmt, args...) IOAT_PMD_LOG(WARNING, fmt, ## args)
+
+#define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
+#define COMPLETION_SZ sizeof(__m128i)
+
+static int
+ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
+{
+ struct rte_ioat_rawdev_config *params = config;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ unsigned short i;
+
+ if (dev->started)
+ return -EBUSY;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (params->ring_size > 4096 || params->ring_size < 64 ||
+ !rte_is_power_of_2(params->ring_size))
+ return -EINVAL;
+
+ ioat->ring_size = params->ring_size;
+ if (ioat->desc_ring != NULL) {
+ rte_memzone_free(ioat->desc_mz);
+ ioat->desc_ring = NULL;
+ ioat->desc_mz = NULL;
+ }
+
+ /* allocate one block of memory for both descriptors
+ * and completion handles.
+ */
+ snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
+ ioat->desc_mz = rte_memzone_reserve(mz_name,
+ (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
+ dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
+ if (ioat->desc_mz == NULL)
+ return -ENOMEM;
+ ioat->desc_ring = ioat->desc_mz->addr;
+ ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
+
+ ioat->ring_addr = ioat->desc_mz->iova;
+
+ /* configure descriptor ring - each one points to next */
+ for (i = 0; i < ioat->ring_size; i++) {
+ ioat->desc_ring[i].next = ioat->ring_addr +
+ (((i + 1) % ioat->ring_size) * DESC_SZ);
+ }
+
+ return 0;
+}
+
+static int
+ioat_dev_start(struct rte_rawdev *dev)
+{
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
+ return -EBUSY;
+
+ /* inform hardware of where the descriptor ring is */
+ ioat->regs->chainaddr = ioat->ring_addr;
+ /* inform hardware of where to write the status/completions */
+ ioat->regs->chancmp = ioat->status_addr;
+
+ /* prime the status register to be set to the last element */
+ ioat->status = ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
+ return 0;
+}
+
+static void
+ioat_dev_stop(struct rte_rawdev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+static void
+ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
+{
+ struct rte_ioat_rawdev_config *cfg = dev_info;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (cfg != NULL)
+ cfg->ring_size = ioat->ring_size;
+}
+
+static const char * const xstat_names[] = {
+ "failed_enqueues", "successful_enqueues",
+ "copies_started", "copies_completed"
+};
+
+static int
+ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ const struct rte_ioat_rawdev *ioat = dev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ switch (ids[i]) {
+ case 0: values[i] = ioat->enqueue_failed; break;
+ case 1: values[i] = ioat->enqueued; break;
+ case 2: values[i] = ioat->started; break;
+ case 3: values[i] = ioat->completed; break;
+ default: values[i] = 0; break;
+ }
+ }
+ return n;
+}
+
+static int
+ioat_xstats_get_names(const struct rte_rawdev *dev,
+ struct rte_rawdev_xstats_name *names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ RTE_SET_USED(dev);
+ if (size < RTE_DIM(xstat_names))
+ return RTE_DIM(xstat_names);
+
+ for (i = 0; i < RTE_DIM(xstat_names); i++)
+ strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
+
+ return RTE_DIM(xstat_names);
+}
+
+static int
+ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
+{
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+ unsigned int i;
+
+ if (!ids) {
+ ioat->enqueue_failed = 0;
+ ioat->enqueued = 0;
+ ioat->started = 0;
+ ioat->completed = 0;
+ return 0;
+ }
+
+ for (i = 0; i < nb_ids; i++) {
+ switch (ids[i]) {
+ case 0:
+ ioat->enqueue_failed = 0;
+ break;
+ case 1:
+ ioat->enqueued = 0;
+ break;
+ case 2:
+ ioat->started = 0;
+ break;
+ case 3:
+ ioat->completed = 0;
+ break;
+ default:
+ IOAT_PMD_WARN("Invalid xstat id - cannot reset value");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+extern int ioat_rawdev_test(uint16_t dev_id);
+
+static int
+ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
+{
+ static const struct rte_rawdev_ops ioat_rawdev_ops = {
+ .dev_configure = ioat_dev_configure,
+ .dev_start = ioat_dev_start,
+ .dev_stop = ioat_dev_stop,
+ .dev_info_get = ioat_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
+ .dev_selftest = ioat_rawdev_test,
+ };
+
+ struct rte_rawdev *rawdev = NULL;
+ struct rte_ioat_rawdev *ioat = NULL;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int ret = 0;
+ int retry = 0;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
+ dev->device.numa_node);
+ if (rawdev == NULL) {
+ IOAT_PMD_ERR("Unable to allocate raw device");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
+ mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
+ dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL) {
+ IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ rawdev->dev_private = mz->addr;
+ rawdev->dev_ops = &ioat_rawdev_ops;
+ rawdev->device = &dev->device;
+ rawdev->driver_name = dev->device.driver->name;
+
+ ioat = rawdev->dev_private;
+ ioat->rawdev = rawdev;
+ ioat->mz = mz;
+ ioat->regs = dev->mem_resource[0].addr;
+ ioat->ring_size = 0;
+ ioat->desc_ring = NULL;
+ ioat->status_addr = ioat->mz->iova +
+ offsetof(struct rte_ioat_rawdev, status);
+
+ /* do device initialization - reset and set error behaviour */
+ if (ioat->regs->chancnt != 1)
+ IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
+ ioat->regs->chancnt);
+
+ if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
+ IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
+ ioat->regs->chanctrl = 0;
+ }
+
+ ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
+ rte_delay_ms(1);
+ ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
+ rte_delay_ms(1);
+ while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
+ ioat->regs->chainaddr = 0;
+ rte_delay_ms(1);
+ if (++retry >= 200) {
+ IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
+ __func__,
+ ioat->regs->chancmd,
+ ioat->regs->chansts,
+ ioat->regs->chanerr);
+ ret = -EIO;
+ }
+ }
+ ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
+ RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
+
+ return 0;
+
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ioat_rawdev_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid device name");
+ return -EINVAL;
+ }
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ IOAT_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ if (rdev->dev_private != NULL) {
+ struct rte_ioat_rawdev *ioat = rdev->dev_private;
+ rdev->dev_private = NULL;
+ rte_memzone_free(ioat->desc_mz);
+ rte_memzone_free(ioat->mz);
+ }
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ IOAT_PMD_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
+{
+ char name[32];
+ int ret = 0;
+
+
+ rte_pci_device_name(&dev->addr, name, sizeof(name));
+ IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
+
+ dev->device.driver = &drv->driver;
+ ret = ioat_rawdev_create(name, dev);
+ return ret;
+}
+
+static int
+ioat_rawdev_remove(struct rte_pci_device *dev)
+{
+ char name[32];
+ int ret;
+
+ rte_pci_device_name(&dev->addr, name, sizeof(name));
+
+ IOAT_PMD_INFO("Closing %s on NUMA node %d",
+ name, dev->device.numa_node);
+
+ ret = ioat_rawdev_destroy(name);
+ return ret;
+}
+
+static const struct rte_pci_id pci_id_ioat_map[] = {
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
+ { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver ioat_pmd_drv = {
+ .id_table = pci_id_ioat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = ioat_rawdev_probe,
+ .remove = ioat_rawdev_remove,
+};
+
+RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
+RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
+RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");
+
+RTE_INIT(ioat_pmd_init_log)
+{
+ ioat_pmd_logtype = rte_log_register(IOAT_PMD_LOG_NAME);
+ if (ioat_pmd_logtype >= 0)
+ rte_log_set_level(ioat_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c
new file mode 100644
index 000000000..c37351af2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/ioat_rawdev_test.c
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <inttypes.h>
+#include <rte_mbuf.h>
+#include "rte_rawdev.h"
+#include "rte_ioat_rawdev.h"
+
+int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
+
+static struct rte_mempool *pool;
+static unsigned short expected_ring_size;
+
+static int
+test_enqueue_copies(int dev_id)
+{
+ const unsigned int length = 1024;
+ unsigned int i;
+
+ do {
+ struct rte_mbuf *src, *dst;
+ char *src_data, *dst_data;
+ struct rte_mbuf *completed[2] = {0};
+
+ /* test doing a single copy */
+ src = rte_pktmbuf_alloc(pool);
+ dst = rte_pktmbuf_alloc(pool);
+ src->data_len = src->pkt_len = length;
+ dst->data_len = dst->pkt_len = length;
+ src_data = rte_pktmbuf_mtod(src, char *);
+ dst_data = rte_pktmbuf_mtod(dst, char *);
+
+ for (i = 0; i < length; i++)
+ src_data[i] = rand() & 0xFF;
+
+ if (rte_ioat_enqueue_copy(dev_id,
+ src->buf_iova + src->data_off,
+ dst->buf_iova + dst->data_off,
+ length,
+ (uintptr_t)src,
+ (uintptr_t)dst,
+ 0 /* no fence */) != 1) {
+ printf("Error with rte_ioat_enqueue_copy\n");
+ return -1;
+ }
+ rte_ioat_do_copies(dev_id);
+ usleep(10);
+
+ if (rte_ioat_completed_copies(dev_id, 1, (void *)&completed[0],
+ (void *)&completed[1]) != 1) {
+ printf("Error with rte_ioat_completed_copies\n");
+ return -1;
+ }
+ if (completed[0] != src || completed[1] != dst) {
+ printf("Error with completions: got (%p, %p), not (%p,%p)\n",
+ completed[0], completed[1], src, dst);
+ return -1;
+ }
+
+ for (i = 0; i < length; i++)
+ if (dst_data[i] != src_data[i]) {
+ printf("Data mismatch at char %u\n", i);
+ return -1;
+ }
+ rte_pktmbuf_free(src);
+ rte_pktmbuf_free(dst);
+ } while (0);
+
+ /* test doing multiple copies */
+ do {
+ struct rte_mbuf *srcs[32], *dsts[32];
+ struct rte_mbuf *completed_src[64];
+ struct rte_mbuf *completed_dst[64];
+ unsigned int j;
+
+ for (i = 0; i < RTE_DIM(srcs); i++) {
+ char *src_data;
+
+ srcs[i] = rte_pktmbuf_alloc(pool);
+ dsts[i] = rte_pktmbuf_alloc(pool);
+ srcs[i]->data_len = srcs[i]->pkt_len = length;
+ dsts[i]->data_len = dsts[i]->pkt_len = length;
+ src_data = rte_pktmbuf_mtod(srcs[i], char *);
+
+ for (j = 0; j < length; j++)
+ src_data[j] = rand() & 0xFF;
+
+ if (rte_ioat_enqueue_copy(dev_id,
+ srcs[i]->buf_iova + srcs[i]->data_off,
+ dsts[i]->buf_iova + dsts[i]->data_off,
+ length,
+ (uintptr_t)srcs[i],
+ (uintptr_t)dsts[i],
+ 0 /* nofence */) != 1) {
+ printf("Error with rte_ioat_enqueue_copy for buffer %u\n",
+ i);
+ return -1;
+ }
+ }
+ rte_ioat_do_copies(dev_id);
+ usleep(100);
+
+ if (rte_ioat_completed_copies(dev_id, 64, (void *)completed_src,
+ (void *)completed_dst) != RTE_DIM(srcs)) {
+ printf("Error with rte_ioat_completed_copies\n");
+ return -1;
+ }
+ for (i = 0; i < RTE_DIM(srcs); i++) {
+ char *src_data, *dst_data;
+
+ if (completed_src[i] != srcs[i]) {
+ printf("Error with source pointer %u\n", i);
+ return -1;
+ }
+ if (completed_dst[i] != dsts[i]) {
+ printf("Error with dest pointer %u\n", i);
+ return -1;
+ }
+
+ src_data = rte_pktmbuf_mtod(srcs[i], char *);
+ dst_data = rte_pktmbuf_mtod(dsts[i], char *);
+ for (j = 0; j < length; j++)
+ if (src_data[j] != dst_data[j]) {
+ printf("Error with copy of packet %u, byte %u\n",
+ i, j);
+ return -1;
+ }
+ rte_pktmbuf_free(srcs[i]);
+ rte_pktmbuf_free(dsts[i]);
+ }
+
+ } while (0);
+
+ return 0;
+}
+
+int
+ioat_rawdev_test(uint16_t dev_id)
+{
+#define IOAT_TEST_RINGSIZE 512
+ struct rte_ioat_rawdev_config p = { .ring_size = -1 };
+ struct rte_rawdev_info info = { .dev_private = &p };
+ struct rte_rawdev_xstats_name *snames = NULL;
+ uint64_t *stats = NULL;
+ unsigned int *ids = NULL;
+ unsigned int nb_xstats;
+ unsigned int i;
+
+ rte_rawdev_info_get(dev_id, &info);
+ if (p.ring_size != expected_ring_size) {
+ printf("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
+ (int)p.ring_size, expected_ring_size);
+ return -1;
+ }
+
+ p.ring_size = IOAT_TEST_RINGSIZE;
+ if (rte_rawdev_configure(dev_id, &info) != 0) {
+ printf("Error with rte_rawdev_configure()\n");
+ return -1;
+ }
+ rte_rawdev_info_get(dev_id, &info);
+ if (p.ring_size != IOAT_TEST_RINGSIZE) {
+ printf("Error, ring size is not %d (%d)\n",
+ IOAT_TEST_RINGSIZE, (int)p.ring_size);
+ return -1;
+ }
+ expected_ring_size = p.ring_size;
+
+ if (rte_rawdev_start(dev_id) != 0) {
+ printf("Error with rte_rawdev_start()\n");
+ return -1;
+ }
+
+ pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
+ 256, /* n == num elements */
+ 32, /* cache size */
+ 0, /* priv size */
+ 2048, /* data room size */
+ info.socket_id);
+ if (pool == NULL) {
+ printf("Error with mempool creation\n");
+ return -1;
+ }
+
+ /* allocate memory for xstats names and values */
+ nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
+
+ snames = malloc(sizeof(*snames) * nb_xstats);
+ if (snames == NULL) {
+ printf("Error allocating xstat names memory\n");
+ goto err;
+ }
+ rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
+
+ ids = malloc(sizeof(*ids) * nb_xstats);
+ if (ids == NULL) {
+ printf("Error allocating xstat ids memory\n");
+ goto err;
+ }
+ for (i = 0; i < nb_xstats; i++)
+ ids[i] = i;
+
+ stats = malloc(sizeof(*stats) * nb_xstats);
+ if (stats == NULL) {
+ printf("Error allocating xstat memory\n");
+ goto err;
+ }
+
+ /* run the test cases */
+ for (i = 0; i < 100; i++) {
+ unsigned int j;
+
+ if (test_enqueue_copies(dev_id) != 0)
+ goto err;
+
+ rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
+ for (j = 0; j < nb_xstats; j++)
+ printf("%s: %"PRIu64" ", snames[j].name, stats[j]);
+ printf("\r");
+ }
+ printf("\n");
+
+ rte_rawdev_stop(dev_id);
+ if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
+ printf("Error resetting xstat values\n");
+ goto err;
+ }
+
+ rte_mempool_free(pool);
+ free(snames);
+ free(stats);
+ free(ids);
+ return 0;
+
+err:
+ rte_rawdev_stop(dev_id);
+ rte_rawdev_xstats_reset(dev_id, NULL, 0);
+ rte_mempool_free(pool);
+ free(snames);
+ free(stats);
+ free(ids);
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/raw/ioat/meson.build b/src/spdk/dpdk/drivers/raw/ioat/meson.build
new file mode 100644
index 000000000..0878418ae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Intel Corporation
+
+build = dpdk_conf.has('RTE_ARCH_X86')
+reason = 'only supported on x86'
+sources = files('ioat_rawdev.c',
+ 'ioat_rawdev_test.c')
+deps += ['rawdev', 'bus_pci', 'mbuf']
+
+install_headers('rte_ioat_rawdev.h',
+ 'rte_ioat_spec.h')
diff --git a/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h
new file mode 100644
index 000000000..4b271f174
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_rawdev.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_IOAT_RAWDEV_H_
+#define _RTE_IOAT_RAWDEV_H_
+
+/**
+ * @file rte_ioat_rawdev.h
+ *
+ * Definitions for using the ioat rawdev device driver
+ *
+ * @warning
+ * @b EXPERIMENTAL: these structures and APIs may change without prior notice
+ */
+
+#include <x86intrin.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_prefetch.h>
+#include "rte_ioat_spec.h"
+
+/** Name of the device driver */
+#define IOAT_PMD_RAWDEV_NAME rawdev_ioat
+/** String reported as the device driver name by rte_rawdev_info_get() */
+#define IOAT_PMD_RAWDEV_NAME_STR "rawdev_ioat"
+/** Name used to adjust the log level for this driver */
+#define IOAT_PMD_LOG_NAME "rawdev.ioat"
+
+/**
+ * Configuration structure for an ioat rawdev instance
+ *
+ * This structure is to be passed as the ".dev_private" parameter when
+ * calling the rte_rawdev_get_info() and rte_rawdev_configure() APIs on
+ * an ioat rawdev instance.
+ */
+struct rte_ioat_rawdev_config {
+ unsigned short ring_size;
+};
+
+/**
+ * @internal
+ * Structure representing a device instance
+ */
+struct rte_ioat_rawdev {
+ struct rte_rawdev *rawdev;
+ const struct rte_memzone *mz;
+ const struct rte_memzone *desc_mz;
+
+ volatile struct rte_ioat_registers *regs;
+ phys_addr_t status_addr;
+ phys_addr_t ring_addr;
+
+ unsigned short ring_size;
+ struct rte_ioat_generic_hw_desc *desc_ring;
+ __m128i *hdls; /* completion handles for returning to user */
+
+
+ unsigned short next_read;
+ unsigned short next_write;
+
+ /* some statistics for tracking, if added/changed update xstats fns*/
+ uint64_t enqueue_failed __rte_cache_aligned;
+ uint64_t enqueued;
+ uint64_t started;
+ uint64_t completed;
+
+ /* to report completions, the device will write status back here */
+ volatile uint64_t status __rte_cache_aligned;
+};
+
+/**
+ * Enqueue a copy operation onto the ioat device
+ *
+ * This queues up a copy operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ * The rawdev device id of the ioat instance
+ * @param src
+ * The physical address of the source buffer
+ * @param dst
+ * The physical address of the destination buffer
+ * @param length
+ * The length of the data to be copied
+ * @param src_hdl
+ * An opaque handle for the source data, to be returned when this operation
+ * has been completed and the user polls for the completion details
+ * @param dst_hdl
+ * An opaque handle for the destination data, to be returned when this
+ * operation has been completed and the user polls for the completion details
+ * @param fence
+ * A flag parameter indicating that hardware should not begin to perform any
+ * subsequently enqueued copy operations until after this operation has
+ * completed
+ * @return
+ * Number of operations enqueued, either 0 or 1
+ */
+static inline int
+rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+ unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
+ int fence)
+{
+ struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
+ unsigned short read = ioat->next_read;
+ unsigned short write = ioat->next_write;
+ unsigned short mask = ioat->ring_size - 1;
+ unsigned short space = mask + read - write;
+ struct rte_ioat_generic_hw_desc *desc;
+
+ if (space == 0) {
+ ioat->enqueue_failed++;
+ return 0;
+ }
+
+ ioat->next_write = write + 1;
+ write &= mask;
+
+ desc = &ioat->desc_ring[write];
+ desc->size = length;
+ /* set descriptor write-back every 16th descriptor */
+ desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3);
+ desc->src_addr = src;
+ desc->dest_addr = dst;
+
+ ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl, (int64_t)src_hdl);
+ rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
+
+ ioat->enqueued++;
+ return 1;
+}
+
+/**
+ * Trigger hardware to begin performing enqueued copy operations
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the copy operations previously enqueued by rte_ioat_enqueue_copy()
+ *
+ * @param dev_id
+ * The rawdev device id of the ioat instance
+ */
+static inline void
+rte_ioat_do_copies(int dev_id)
+{
+ struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
+ ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
+ .control.completion_update = 1;
+ rte_compiler_barrier();
+ ioat->regs->dmacount = ioat->next_write;
+ ioat->started = ioat->enqueued;
+}
+
+/**
+ * @internal
+ * Returns the index of the last completed operation.
+ */
+static inline int
+rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
+{
+ uint64_t status = ioat->status;
+
+ /* lower 3 bits indicate "transfer status" : active, idle, halted.
+ * We can ignore bit 0.
+ */
+ *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
+ return (status - ioat->ring_addr) >> 6;
+}
+
+/**
+ * Returns details of copy operations that have been completed
+ *
+ * Returns to the caller the user-provided "handles" for the copy operations
+ * which have been completed by the hardware, and not already returned by
+ * a previous call to this API.
+ *
+ * @param dev_id
+ * The rawdev device id of the ioat instance
+ * @param max_copies
+ * The number of entries which can fit in the src_hdls and dst_hdls
+ * arrays, i.e. max number of completed operations to report
+ * @param src_hdls
+ * Array to hold the source handle parameters of the completed copies
+ * @param dst_hdls
+ * Array to hold the destination handle parameters of the completed copies
+ * @return
+ * -1 on error, with rte_errno set appropriately.
+ * Otherwise number of completed operations i.e. number of entries written
+ * to the src_hdls and dst_hdls array parameters.
+ */
+static inline int
+rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
+ uintptr_t *src_hdls, uintptr_t *dst_hdls)
+{
+ struct rte_ioat_rawdev *ioat = rte_rawdevs[dev_id].dev_private;
+ unsigned short mask = (ioat->ring_size - 1);
+ unsigned short read = ioat->next_read;
+ unsigned short end_read, count;
+ int error;
+ int i = 0;
+
+ end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
+ count = (end_read - (read & mask)) & mask;
+
+ if (error) {
+ rte_errno = EIO;
+ return -1;
+ }
+
+ if (count > max_copies)
+ count = max_copies;
+
+ for (; i < count - 1; i += 2, read += 2) {
+ __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
+ __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
+
+ _mm_storeu_si128((void *)&src_hdls[i],
+ _mm_unpacklo_epi64(hdls0, hdls1));
+ _mm_storeu_si128((void *)&dst_hdls[i],
+ _mm_unpackhi_epi64(hdls0, hdls1));
+ }
+ for (; i < count; i++, read++) {
+ uintptr_t *hdls = (void *)&ioat->hdls[read & mask];
+ src_hdls[i] = hdls[0];
+ dst_hdls[i] = hdls[1];
+ }
+
+ ioat->next_read = read;
+ ioat->completed += count;
+ return count;
+}
+
+#endif /* _RTE_IOAT_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h
new file mode 100644
index 000000000..c6e7929b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/rte_ioat_spec.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) Intel Corporation
+ */
+
+/**
+ * \file
+ * I/OAT specification definitions
+ *
+ * Taken from ioat_spec.h from SPDK project, with prefix renames and
+ * other minor changes.
+ */
+
+#ifndef RTE_IOAT_SPEC_H
+#define RTE_IOAT_SPEC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#define RTE_IOAT_PCI_CHANERR_INT_OFFSET 0x180
+
+#define RTE_IOAT_INTRCTRL_MASTER_INT_EN 0x01
+
+#define RTE_IOAT_VER_3_0 0x30
+#define RTE_IOAT_VER_3_3 0x33
+
+/* DMA Channel Registers */
+#define RTE_IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
+#define RTE_IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
+#define RTE_IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
+#define RTE_IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
+#define RTE_IOAT_CHANCTRL_ERR_INT_EN 0x0010
+#define RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
+#define RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
+#define RTE_IOAT_CHANCTRL_INT_REARM 0x0001
+
+/* DMA Channel Capabilities */
+#define RTE_IOAT_DMACAP_PB (1 << 0)
+#define RTE_IOAT_DMACAP_DCA (1 << 4)
+#define RTE_IOAT_DMACAP_BFILL (1 << 6)
+#define RTE_IOAT_DMACAP_XOR (1 << 8)
+#define RTE_IOAT_DMACAP_PQ (1 << 9)
+#define RTE_IOAT_DMACAP_DMA_DIF (1 << 10)
+
+struct rte_ioat_registers {
+ uint8_t chancnt;
+ uint8_t xfercap;
+ uint8_t genctrl;
+ uint8_t intrctrl;
+ uint32_t attnstatus;
+ uint8_t cbver; /* 0x08 */
+ uint8_t reserved4[0x3]; /* 0x09 */
+ uint16_t intrdelay; /* 0x0C */
+ uint16_t cs_status; /* 0x0E */
+ uint32_t dmacapability; /* 0x10 */
+ uint8_t reserved5[0x6C]; /* 0x14 */
+ uint16_t chanctrl; /* 0x80 */
+ uint8_t reserved6[0x2]; /* 0x82 */
+ uint8_t chancmd; /* 0x84 */
+ uint8_t reserved3[1]; /* 0x85 */
+ uint16_t dmacount; /* 0x86 */
+ uint64_t chansts; /* 0x88 */
+ uint64_t chainaddr; /* 0x90 */
+ uint64_t chancmp; /* 0x98 */
+ uint8_t reserved2[0x8]; /* 0xA0 */
+ uint32_t chanerr; /* 0xA8 */
+ uint32_t chanerrmask; /* 0xAC */
+} __rte_packed;
+
+#define RTE_IOAT_CHANCMD_RESET 0x20
+#define RTE_IOAT_CHANCMD_SUSPEND 0x04
+
+#define RTE_IOAT_CHANSTS_STATUS 0x7ULL
+#define RTE_IOAT_CHANSTS_ACTIVE 0x0
+#define RTE_IOAT_CHANSTS_IDLE 0x1
+#define RTE_IOAT_CHANSTS_SUSPENDED 0x2
+#define RTE_IOAT_CHANSTS_HALTED 0x3
+#define RTE_IOAT_CHANSTS_ARMED 0x4
+
+#define RTE_IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
+#define RTE_IOAT_CHANSTS_SOFT_ERROR 0x10ULL
+
+#define RTE_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
+
+#define RTE_IOAT_CHANCMP_ALIGN 8 /* CHANCMP address must be 64-bit aligned */
+
+struct rte_ioat_generic_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t reserved2: 1;
+ uint32_t src_page_break: 1;
+ uint32_t dest_page_break: 1;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t reserved: 13;
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t op_specific[4];
+};
+
+struct rte_ioat_dma_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t null: 1;
+ uint32_t src_page_break: 1;
+ uint32_t dest_page_break: 1;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t reserved: 13;
+#define RTE_IOAT_OP_COPY 0x00
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t reserved2;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct rte_ioat_fill_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t reserved: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t reserved2: 2;
+ uint32_t dest_page_break: 1;
+ uint32_t bundle: 1;
+ uint32_t reserved3: 15;
+#define RTE_IOAT_OP_FILL 0x01
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_data;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t next_dest_addr;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct rte_ioat_xor_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t src_count: 3;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t reserved: 13;
+#define RTE_IOAT_OP_XOR 0x87
+#define RTE_IOAT_OP_XOR_VAL 0x88
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+};
+
+struct rte_ioat_xor_ext_hw_desc {
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t next;
+ uint64_t reserved[4];
+};
+
+struct rte_ioat_pq_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t src_count: 3;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t p_disable: 1;
+ uint32_t q_disable: 1;
+ uint32_t reserved: 11;
+#define RTE_IOAT_OP_PQ 0x89
+#define RTE_IOAT_OP_PQ_VAL 0x8a
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint8_t coef[8];
+ uint64_t q_addr;
+};
+
+struct rte_ioat_pq_ext_hw_desc {
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t next;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t reserved[2];
+};
+
+struct rte_ioat_pq_update_hw_desc {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable: 1;
+ uint32_t src_snoop_disable: 1;
+ uint32_t dest_snoop_disable: 1;
+ uint32_t completion_update: 1;
+ uint32_t fence: 1;
+ uint32_t src_cnt: 3;
+ uint32_t bundle: 1;
+ uint32_t dest_dca: 1;
+ uint32_t hint: 1;
+ uint32_t p_disable: 1;
+ uint32_t q_disable: 1;
+ uint32_t reserved: 3;
+ uint32_t coef: 8;
+#define RTE_IOAT_OP_PQ_UP 0x8b
+ uint32_t op: 8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t p_src;
+ uint64_t q_src;
+ uint64_t q_addr;
+};
+
+struct rte_ioat_raw_hw_desc {
+ uint64_t field[8];
+};
+
+union rte_ioat_hw_desc {
+ struct rte_ioat_raw_hw_desc raw;
+ struct rte_ioat_generic_hw_desc generic;
+ struct rte_ioat_dma_hw_desc dma;
+ struct rte_ioat_fill_hw_desc fill;
+ struct rte_ioat_xor_hw_desc xor_desc;
+ struct rte_ioat_xor_ext_hw_desc xor_ext;
+ struct rte_ioat_pq_hw_desc pq;
+ struct rte_ioat_pq_ext_hw_desc pq_ext;
+ struct rte_ioat_pq_update_hw_desc pq_update;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_IOAT_SPEC_H */
diff --git a/src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map b/src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/raw/ioat/rte_rawdev_ioat_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};