summaryrefslogtreecommitdiffstats
path: root/src/spdk/module/accel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/module/accel
parentInitial commit. (diff)
downloadceph-upstream/18.2.2.tar.xz
ceph-upstream/18.2.2.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/module/accel')
-rw-r--r--src/spdk/module/accel/Makefile46
-rw-r--r--src/spdk/module/accel/idxd/Makefile45
-rw-r--r--src/spdk/module/accel/idxd/accel_engine_idxd.c847
-rw-r--r--src/spdk/module/accel/idxd/accel_engine_idxd.h43
-rw-r--r--src/spdk/module/accel/idxd/accel_engine_idxd_rpc.c75
-rw-r--r--src/spdk/module/accel/ioat/Makefile45
-rw-r--r--src/spdk/module/accel/ioat/accel_engine_ioat.c764
-rw-r--r--src/spdk/module/accel/ioat/accel_engine_ioat.h44
-rw-r--r--src/spdk/module/accel/ioat/accel_engine_ioat_rpc.c116
9 files changed, 2025 insertions, 0 deletions
diff --git a/src/spdk/module/accel/Makefile b/src/spdk/module/accel/Makefile
new file mode 100644
index 000000000..bafa7b9ca
--- /dev/null
+++ b/src/spdk/module/accel/Makefile
@@ -0,0 +1,46 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = ioat
+
+DIRS-$(CONFIG_IDXD) += idxd
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/module/accel/idxd/Makefile b/src/spdk/module/accel/idxd/Makefile
new file mode 100644
index 000000000..f2540f900
--- /dev/null
+++ b/src/spdk/module/accel/idxd/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+SO_VER := 1
+SO_MINOR := 0
+
+LIBNAME = accel_idxd
+C_SRCS = accel_engine_idxd.c accel_engine_idxd_rpc.c
+
+SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map
+
+include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk
diff --git a/src/spdk/module/accel/idxd/accel_engine_idxd.c b/src/spdk/module/accel/idxd/accel_engine_idxd.c
new file mode 100644
index 000000000..e5af0181f
--- /dev/null
+++ b/src/spdk/module/accel/idxd/accel_engine_idxd.c
@@ -0,0 +1,847 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "accel_engine_idxd.h"
+
+#include "spdk/stdinc.h"
+
+#include "spdk_internal/accel_engine.h"
+#include "spdk_internal/log.h"
+#include "spdk_internal/idxd.h"
+
+#include "spdk/env.h"
+#include "spdk/conf.h"
+#include "spdk/event.h"
+#include "spdk/thread.h"
+#include "spdk/idxd.h"
+#include "spdk/util.h"
+#include "spdk/json.h"
+
+#define ALIGN_4K 0x1000
+
+static bool g_idxd_enable = false;
+uint32_t g_config_number;
+
+enum channel_state {
+ IDXD_CHANNEL_ACTIVE,
+ IDXD_CHANNEL_PAUSED,
+ IDXD_CHANNEL_ERROR,
+};
+
+static bool g_idxd_initialized = false;
+
+struct pci_device {
+ struct spdk_pci_device *pci_dev;
+ TAILQ_ENTRY(pci_device) tailq;
+};
+static TAILQ_HEAD(, pci_device) g_pci_devices = TAILQ_HEAD_INITIALIZER(g_pci_devices);
+
+struct idxd_device {
+ struct spdk_idxd_device *idxd;
+ int num_channels;
+ TAILQ_ENTRY(idxd_device) tailq;
+};
+static TAILQ_HEAD(, idxd_device) g_idxd_devices = TAILQ_HEAD_INITIALIZER(g_idxd_devices);
+static struct idxd_device *g_next_dev = NULL;
+
+struct idxd_op {
+ struct spdk_idxd_io_channel *chan;
+ void *cb_arg;
+ spdk_idxd_req_cb cb_fn;
+ void *src;
+ union {
+ void *dst;
+ void *src2;
+ };
+ void *dst2;
+ uint32_t seed;
+ uint64_t fill_pattern;
+ uint32_t op_code;
+ uint64_t nbytes;
+ struct idxd_batch *batch;
+ TAILQ_ENTRY(idxd_op) link;
+};
+
+struct idxd_io_channel {
+ struct spdk_idxd_io_channel *chan;
+ struct spdk_idxd_device *idxd;
+ struct idxd_device *dev;
+ enum channel_state state;
+ struct spdk_poller *poller;
+ TAILQ_HEAD(, idxd_op) queued_ops;
+};
+
+struct idxd_task {
+ spdk_accel_completion_cb cb;
+};
+
+pthread_mutex_t g_configuration_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static struct spdk_io_channel *idxd_get_io_channel(void);
+
+static struct idxd_device *
+idxd_select_device(void)
+{
+ /*
+ * We allow channels to share underlying devices,
+ * selection is round-robin based.
+ */
+
+ g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
+ if (g_next_dev == NULL) {
+ g_next_dev = TAILQ_FIRST(&g_idxd_devices);
+ }
+ return g_next_dev;
+}
+
+static int
+idxd_poll(void *arg)
+{
+ struct idxd_io_channel *chan = arg;
+ struct idxd_op *op = NULL;
+ int rc;
+
+ spdk_idxd_process_events(chan->chan);
+
+ /* Check if there are any pending ops to process if the channel is active */
+ if (chan->state != IDXD_CHANNEL_ACTIVE) {
+ return -1;
+ }
+
+ while (!TAILQ_EMPTY(&chan->queued_ops)) {
+ op = TAILQ_FIRST(&chan->queued_ops);
+
+ switch (op->op_code) {
+ case IDXD_OPCODE_MEMMOVE:
+ rc = spdk_idxd_submit_copy(op->chan, op->dst, op->src, op->nbytes,
+ op->cb_fn, op->cb_arg);
+ break;
+ case IDXD_OPCODE_DUALCAST:
+ rc = spdk_idxd_submit_dualcast(op->chan, op->dst, op->dst2, op->src, op->nbytes,
+ op->cb_fn, op->cb_arg);
+ break;
+ case IDXD_OPCODE_COMPARE:
+ rc = spdk_idxd_submit_compare(op->chan, op->src, op->src2, op->nbytes,
+ op->cb_fn, op->cb_arg);
+ break;
+ case IDXD_OPCODE_MEMFILL:
+ rc = spdk_idxd_submit_fill(op->chan, op->dst, op->fill_pattern, op->nbytes,
+ op->cb_fn, op->cb_arg);
+ break;
+ case IDXD_OPCODE_CRC32C_GEN:
+ rc = spdk_idxd_submit_crc32c(op->chan, op->dst, op->src, op->seed, op->nbytes,
+ op->cb_fn, op->cb_arg);
+ break;
+ case IDXD_OPCODE_BATCH:
+ rc = spdk_idxd_batch_submit(op->chan, op->batch, op->cb_fn, op->cb_arg);
+ break;
+ default:
+ /* Should never get here */
+ assert(false);
+ break;
+ }
+ if (rc == 0) {
+ TAILQ_REMOVE(&chan->queued_ops, op, link);
+ free(op);
+ } else {
+ /* Busy, resubmit to try again later */
+ break;
+ }
+ }
+
+ return -1;
+}
+
+static size_t
+accel_engine_idxd_get_ctx_size(void)
+{
+ return sizeof(struct idxd_task) + sizeof(struct spdk_accel_task);
+}
+
+static void
+idxd_done(void *cb_arg, int status)
+{
+ struct spdk_accel_task *accel_req;
+ struct idxd_task *idxd_task = cb_arg;
+
+ accel_req = SPDK_CONTAINEROF(idxd_task, struct spdk_accel_task,
+ offload_ctx);
+
+ idxd_task->cb(accel_req, status);
+}
+
+static struct idxd_op *
+_prep_queue_command(struct idxd_io_channel *chan, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_op *op_to_queue;
+
+ op_to_queue = calloc(1, sizeof(struct idxd_op));
+ if (op_to_queue == NULL) {
+ SPDK_ERRLOG("Failed to allocate operation for queueing\n");
+ return NULL;
+ }
+
+ op_to_queue->chan = chan->chan;
+ op_to_queue->cb_fn = cb_fn;
+ op_to_queue->cb_arg = cb_arg;
+
+ return op_to_queue;
+}
+
+static int
+idxd_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ int rc = 0;
+
+ idxd_task->cb = cb_fn;
+
+ if (chan->state == IDXD_CHANNEL_ACTIVE) {
+ rc = spdk_idxd_submit_copy(chan->chan, dst, src, nbytes, idxd_done, idxd_task);
+ }
+
+ if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
+ struct idxd_op *op_to_queue;
+
+ /* Commpom prep. */
+ op_to_queue = _prep_queue_command(chan, idxd_done, idxd_task);
+ if (op_to_queue == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Command specific. */
+ op_to_queue->dst = dst;
+ op_to_queue->src = src;
+ op_to_queue->nbytes = nbytes;
+ op_to_queue->op_code = IDXD_OPCODE_MEMMOVE;
+
+ /* Queue the operation. */
+ TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
+ return 0;
+
+ } else if (chan->state == IDXD_CHANNEL_ERROR) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+idxd_submit_dualcast(struct spdk_io_channel *ch, void *dst1, void *dst2, void *src,
+ uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ int rc = 0;
+
+ idxd_task->cb = cb_fn;
+
+ if (chan->state == IDXD_CHANNEL_ACTIVE) {
+ rc = spdk_idxd_submit_dualcast(chan->chan, dst1, dst2, src, nbytes, idxd_done, idxd_task);
+ }
+
+ if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
+ struct idxd_op *op_to_queue;
+
+ /* Commpom prep. */
+ op_to_queue = _prep_queue_command(chan, idxd_done, idxd_task);
+ if (op_to_queue == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Command specific. */
+ op_to_queue->dst = dst1;
+ op_to_queue->dst2 = dst2;
+ op_to_queue->src = src;
+ op_to_queue->nbytes = nbytes;
+ op_to_queue->op_code = IDXD_OPCODE_DUALCAST;
+
+ /* Queue the operation. */
+ TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
+ return 0;
+
+ } else if (chan->state == IDXD_CHANNEL_ERROR) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+idxd_submit_compare(struct spdk_io_channel *ch, void *src1, void *src2,
+ uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ int rc = 0;
+
+ idxd_task->cb = cb_fn;
+
+ if (chan->state == IDXD_CHANNEL_ACTIVE) {
+ rc = spdk_idxd_submit_compare(chan->chan, src1, src2, nbytes, idxd_done, idxd_task);
+ }
+
+ if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
+ struct idxd_op *op_to_queue;
+
+ /* Commpom prep. */
+ op_to_queue = _prep_queue_command(chan, idxd_done, idxd_task);
+ if (op_to_queue == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Command specific. */
+ op_to_queue->src = src1;
+ op_to_queue->src2 = src2;
+ op_to_queue->nbytes = nbytes;
+ op_to_queue->op_code = IDXD_OPCODE_COMPARE;
+
+ /* Queue the operation. */
+ TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
+ return 0;
+
+ } else if (chan->state == IDXD_CHANNEL_ERROR) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+idxd_submit_fill(struct spdk_io_channel *ch, void *dst, uint8_t fill,
+ uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ int rc = 0;
+ uint64_t fill_pattern;
+
+ idxd_task->cb = cb_fn;
+ memset(&fill_pattern, fill, sizeof(uint64_t));
+
+ if (chan->state == IDXD_CHANNEL_ACTIVE) {
+ rc = spdk_idxd_submit_fill(chan->chan, dst, fill_pattern, nbytes, idxd_done, idxd_task);
+ }
+
+ if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
+ struct idxd_op *op_to_queue;
+
+ /* Commpom prep. */
+ op_to_queue = _prep_queue_command(chan, idxd_done, idxd_task);
+ if (op_to_queue == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Command specific. */
+ op_to_queue->dst = dst;
+ op_to_queue->fill_pattern = fill_pattern;
+ op_to_queue->nbytes = nbytes;
+ op_to_queue->op_code = IDXD_OPCODE_MEMFILL;
+
+ /* Queue the operation. */
+ TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
+ return 0;
+
+ } else if (chan->state == IDXD_CHANNEL_ERROR) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+idxd_submit_crc32c(struct spdk_io_channel *ch, uint32_t *dst, void *src,
+ uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ int rc = 0;
+
+ idxd_task->cb = cb_fn;
+
+ if (chan->state == IDXD_CHANNEL_ACTIVE) {
+ rc = spdk_idxd_submit_crc32c(chan->chan, dst, src, seed, nbytes, idxd_done, idxd_task);
+ }
+
+ if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
+ struct idxd_op *op_to_queue;
+
+ /* Commpom prep. */
+ op_to_queue = _prep_queue_command(chan, idxd_done, idxd_task);
+ if (op_to_queue == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Command specific. */
+ op_to_queue->dst = dst;
+ op_to_queue->src = src;
+ op_to_queue->seed = seed;
+ op_to_queue->nbytes = nbytes;
+ op_to_queue->op_code = IDXD_OPCODE_CRC32C_GEN;
+
+ /* Queue the operation. */
+ TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
+ return 0;
+
+ } else if (chan->state == IDXD_CHANNEL_ERROR) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static uint64_t
+idxd_get_capabilities(void)
+{
+ return ACCEL_COPY | ACCEL_FILL | ACCEL_CRC32C | ACCEL_COMPARE |
+ ACCEL_DUALCAST | ACCEL_BATCH;
+}
+
+static uint32_t
+idxd_batch_get_max(void)
+{
+ return spdk_idxd_batch_get_max();
+}
+
+static struct spdk_accel_batch *
+idxd_batch_start(struct spdk_io_channel *ch)
+{
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+
+ return (struct spdk_accel_batch *)spdk_idxd_batch_create(chan->chan);
+}
+
+static int
+idxd_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch)
+{
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+
+ return spdk_idxd_batch_cancel(chan->chan, batch);
+}
+
+static int
+idxd_batch_submit(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+ int rc = 0;
+
+ idxd_task->cb = cb_fn;
+
+ if (chan->state == IDXD_CHANNEL_ACTIVE) {
+ rc = spdk_idxd_batch_submit(chan->chan, batch, idxd_done, idxd_task);
+ }
+
+ if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
+ struct idxd_op *op_to_queue;
+
+ /* Commpom prep. */
+ op_to_queue = _prep_queue_command(chan, idxd_done, idxd_task);
+ if (op_to_queue == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Command specific. */
+ op_to_queue->batch = batch;
+ op_to_queue->op_code = IDXD_OPCODE_BATCH;
+
+ /* Queue the operation. */
+ TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
+ return 0;
+
+ } else if (chan->state == IDXD_CHANNEL_ERROR) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+idxd_batch_prep_copy(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
+ void *dst, void *src, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+
+ idxd_task->cb = cb_fn;
+
+ return spdk_idxd_batch_prep_copy(chan->chan, batch, dst, src, nbytes,
+ idxd_done, idxd_task);
+}
+
+static int
+idxd_batch_prep_fill(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
+ void *dst, uint8_t fill, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ uint64_t fill_pattern;
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+
+ idxd_task->cb = cb_fn;
+ memset(&fill_pattern, fill, sizeof(uint64_t));
+
+ return spdk_idxd_batch_prep_fill(chan->chan, batch, dst, fill_pattern, nbytes, idxd_done,
+ idxd_task);
+}
+
+static int
+idxd_batch_prep_dualcast(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
+ void *dst1, void *dst2, void *src, uint64_t nbytes,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+
+ idxd_task->cb = cb_fn;
+
+ return spdk_idxd_batch_prep_dualcast(chan->chan, batch, dst1, dst2, src, nbytes, idxd_done,
+ idxd_task);
+}
+
+static int
+idxd_batch_prep_crc32c(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
+ uint32_t *dst, void *src, uint32_t seed, uint64_t nbytes,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+
+ idxd_task->cb = cb_fn;
+
+ return spdk_idxd_batch_prep_crc32c(chan->chan, batch, dst, src, seed, nbytes, idxd_done,
+ idxd_task);
+}
+
+static int
+idxd_batch_prep_compare(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
+ void *src1, void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
+ struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
+ struct idxd_batch *batch = (struct idxd_batch *)_batch;
+
+ idxd_task->cb = cb_fn;
+
+ return spdk_idxd_batch_prep_compare(chan->chan, batch, src1, src2, nbytes, idxd_done,
+ idxd_task);
+}
+
+static struct spdk_accel_engine idxd_accel_engine = {
+ .get_capabilities = idxd_get_capabilities,
+ .copy = idxd_submit_copy,
+ .batch_get_max = idxd_batch_get_max,
+ .batch_create = idxd_batch_start,
+ .batch_cancel = idxd_batch_cancel,
+ .batch_prep_copy = idxd_batch_prep_copy,
+ .batch_prep_fill = idxd_batch_prep_fill,
+ .batch_prep_dualcast = idxd_batch_prep_dualcast,
+ .batch_prep_crc32c = idxd_batch_prep_crc32c,
+ .batch_prep_compare = idxd_batch_prep_compare,
+ .batch_submit = idxd_batch_submit,
+ .dualcast = idxd_submit_dualcast,
+ .compare = idxd_submit_compare,
+ .fill = idxd_submit_fill,
+ .crc32c = idxd_submit_crc32c,
+ .get_io_channel = idxd_get_io_channel,
+};
+
+/*
+ * Configure the max number of descriptors that a channel is
+ * allowed to use based on the total number of current channels.
+ * This is to allow for dynamic load balancing for hw flow control.
+ */
+static void
+_config_max_desc(struct spdk_io_channel_iter *i)
+{
+ struct idxd_io_channel *chan;
+ struct spdk_io_channel *ch;
+ int rc;
+
+ ch = spdk_io_channel_iter_get_channel(i);
+ chan = spdk_io_channel_get_ctx(ch);
+
+ pthread_mutex_lock(&g_configuration_lock);
+ rc = spdk_idxd_reconfigure_chan(chan->chan, chan->dev->num_channels);
+ pthread_mutex_unlock(&g_configuration_lock);
+ if (rc == 0) {
+ chan->state = IDXD_CHANNEL_ACTIVE;
+ } else {
+ chan->state = IDXD_CHANNEL_ERROR;
+ }
+
+ spdk_for_each_channel_continue(i, 0);
+}
+
+/* Pauses a channel so that it can be re-configured. */
+static void
+_pause_chan(struct spdk_io_channel_iter *i)
+{
+ struct idxd_io_channel *chan;
+ struct spdk_io_channel *ch;
+
+ ch = spdk_io_channel_iter_get_channel(i);
+ chan = spdk_io_channel_get_ctx(ch);
+
+ /* start queueing up new requests. */
+ chan->state = IDXD_CHANNEL_PAUSED;
+
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+_pause_chan_done(struct spdk_io_channel_iter *i, int status)
+{
+ spdk_for_each_channel(&idxd_accel_engine, _config_max_desc, NULL, NULL);
+}
+
+static int
+idxd_create_cb(void *io_device, void *ctx_buf)
+{
+ struct idxd_io_channel *chan = ctx_buf;
+ struct idxd_device *dev;
+ int rc;
+
+ dev = idxd_select_device();
+ if (dev == NULL) {
+ SPDK_ERRLOG("Failed to allocate idxd_device\n");
+ return -EINVAL;
+ }
+
+ chan->chan = spdk_idxd_get_channel(dev->idxd);
+ if (chan->chan == NULL) {
+ return -ENOMEM;
+ }
+
+ chan->dev = dev;
+ chan->poller = spdk_poller_register(idxd_poll, chan, 0);
+ TAILQ_INIT(&chan->queued_ops);
+
+ /*
+ * Configure the channel but leave paused until all others
+ * are paused and re-configured based on the new number of
+ * channels. This enables dynamic load balancing for HW
+ * flow control.
+ */
+ pthread_mutex_lock(&g_configuration_lock);
+ rc = spdk_idxd_configure_chan(chan->chan);
+ if (rc) {
+ SPDK_ERRLOG("Failed to configure new channel rc = %d\n", rc);
+ chan->state = IDXD_CHANNEL_ERROR;
+ spdk_poller_unregister(&chan->poller);
+ pthread_mutex_unlock(&g_configuration_lock);
+ return rc;
+ }
+
+ chan->state = IDXD_CHANNEL_PAUSED;
+ chan->dev->num_channels++;
+ pthread_mutex_unlock(&g_configuration_lock);
+
+ /*
+ * Pause all channels so that we can set proper flow control
+ * per channel. When all are paused, we'll update the max
+ * number of descriptors allowed per channel.
+ */
+ spdk_for_each_channel(&idxd_accel_engine, _pause_chan, NULL,
+ _pause_chan_done);
+
+ return 0;
+}
+
+static void
+_pause_chan_destroy_done(struct spdk_io_channel_iter *i, int status)
+{
+ /* Rebalance the rings with the smaller number of remaining channels. */
+ spdk_for_each_channel(&idxd_accel_engine, _config_max_desc, NULL, NULL);
+}
+
+static void
+idxd_destroy_cb(void *io_device, void *ctx_buf)
+{
+ struct idxd_io_channel *chan = ctx_buf;
+
+ pthread_mutex_lock(&g_configuration_lock);
+ assert(chan->dev->num_channels > 0);
+ chan->dev->num_channels--;
+ spdk_idxd_reconfigure_chan(chan->chan, 0);
+ pthread_mutex_unlock(&g_configuration_lock);
+
+ spdk_poller_unregister(&chan->poller);
+ spdk_idxd_put_channel(chan->chan);
+
+ /* Pause each channel then rebalance the max number of ring slots. */
+ spdk_for_each_channel(&idxd_accel_engine, _pause_chan, NULL,
+ _pause_chan_destroy_done);
+}
+
+static struct spdk_io_channel *
+idxd_get_io_channel(void)
+{
+ return spdk_get_io_channel(&idxd_accel_engine);
+}
+
+static bool
+probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
+ struct pci_device *pdev;
+
+ SPDK_NOTICELOG(
+ " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
+ pci_addr.domain,
+ pci_addr.bus,
+ pci_addr.dev,
+ pci_addr.func,
+ spdk_pci_device_get_vendor_id(pci_dev),
+ spdk_pci_device_get_device_id(pci_dev));
+
+ pdev = calloc(1, sizeof(*pdev));
+ if (pdev == NULL) {
+ return false;
+ }
+ pdev->pci_dev = pci_dev;
+ TAILQ_INSERT_TAIL(&g_pci_devices, pdev, tailq);
+
+ /* Claim the device in case conflict with other process */
+ if (spdk_pci_device_claim(pci_dev) < 0) {
+ return false;
+ }
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_idxd_device *idxd)
+{
+ struct idxd_device *dev;
+
+ dev = calloc(1, sizeof(*dev));
+ if (dev == NULL) {
+ SPDK_ERRLOG("Failed to allocate device struct\n");
+ return;
+ }
+
+ dev->idxd = idxd;
+ if (g_next_dev == NULL) {
+ g_next_dev = dev;
+ }
+
+ TAILQ_INSERT_TAIL(&g_idxd_devices, dev, tailq);
+}
+
+void
+accel_engine_idxd_enable_probe(uint32_t config_number)
+{
+ if (config_number > IDXD_MAX_CONFIG_NUM) {
+ SPDK_ERRLOG("Invalid config number, using default of 0\n");
+ config_number = 0;
+ }
+
+ g_config_number = config_number;
+ g_idxd_enable = true;
+ spdk_idxd_set_config(g_config_number);
+}
+
+static int
+accel_engine_idxd_init(void)
+{
+ if (!g_idxd_enable) {
+ return -EINVAL;
+ }
+
+ if (spdk_idxd_probe(NULL, probe_cb, attach_cb) != 0) {
+ SPDK_ERRLOG("spdk_idxd_probe() failed\n");
+ return -EINVAL;
+ }
+
+ g_idxd_initialized = true;
+ SPDK_NOTICELOG("Accel engine updated to use IDXD DSA engine.\n");
+ spdk_accel_hw_engine_register(&idxd_accel_engine);
+ spdk_io_device_register(&idxd_accel_engine, idxd_create_cb, idxd_destroy_cb,
+ sizeof(struct idxd_io_channel), "idxd_accel_engine");
+ return 0;
+}
+
+static void
+accel_engine_idxd_exit(void *ctx)
+{
+ struct idxd_device *dev;
+ struct pci_device *pci_dev;
+
+ if (g_idxd_initialized) {
+ spdk_io_device_unregister(&idxd_accel_engine, NULL);
+ }
+
+ while (!TAILQ_EMPTY(&g_idxd_devices)) {
+ dev = TAILQ_FIRST(&g_idxd_devices);
+ TAILQ_REMOVE(&g_idxd_devices, dev, tailq);
+ spdk_idxd_detach(dev->idxd);
+ free(dev);
+ }
+
+ while (!TAILQ_EMPTY(&g_pci_devices)) {
+ pci_dev = TAILQ_FIRST(&g_pci_devices);
+ TAILQ_REMOVE(&g_pci_devices, pci_dev, tailq);
+ spdk_pci_device_detach(pci_dev->pci_dev);
+ free(pci_dev);
+ }
+
+ spdk_accel_engine_module_finish();
+}
+
+static void
+accel_engine_idxd_write_config_json(struct spdk_json_write_ctx *w)
+{
+ if (g_idxd_enable) {
+ spdk_json_write_object_begin(w);
+ spdk_json_write_named_string(w, "method", "idxd_scan_accel_engine");
+ spdk_json_write_named_object_begin(w, "params");
+ spdk_json_write_named_uint32(w, "config_number", g_config_number);
+ spdk_json_write_object_end(w);
+ spdk_json_write_object_end(w);
+ }
+}
+
+SPDK_ACCEL_MODULE_REGISTER(accel_engine_idxd_init, accel_engine_idxd_exit,
+ NULL, accel_engine_idxd_write_config_json,
+ accel_engine_idxd_get_ctx_size)
+
+SPDK_LOG_REGISTER_COMPONENT("accel_idxd", SPDK_LOG_ACCEL_IDXD)
diff --git a/src/spdk/module/accel/idxd/accel_engine_idxd.h b/src/spdk/module/accel/idxd/accel_engine_idxd.h
new file mode 100644
index 000000000..dac6569a2
--- /dev/null
+++ b/src/spdk/module/accel/idxd/accel_engine_idxd.h
@@ -0,0 +1,43 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SPDK_ACCEL_ENGINE_IDXD_H
+#define SPDK_ACCEL_ENGINE_IDXD_H
+
+#include "spdk/stdinc.h"
+
+#define IDXD_MAX_DEVICES 16
+
+void accel_engine_idxd_enable_probe(uint32_t config_number);
+
+#endif /* SPDK_ACCEL_ENGINE_IDXD_H */
diff --git a/src/spdk/module/accel/idxd/accel_engine_idxd_rpc.c b/src/spdk/module/accel/idxd/accel_engine_idxd_rpc.c
new file mode 100644
index 000000000..c3406c510
--- /dev/null
+++ b/src/spdk/module/accel/idxd/accel_engine_idxd_rpc.c
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "accel_engine_idxd.h"
+
+#include "spdk/rpc.h"
+#include "spdk/util.h"
+#include "spdk/event.h"
+#include "spdk/stdinc.h"
+#include "spdk/env.h"
+
+struct rpc_idxd_scan_accel_engine {
+ uint32_t config_number;
+};
+
+static const struct spdk_json_object_decoder rpc_idxd_scan_accel_engine_decoder[] = {
+ {"config_number", offsetof(struct rpc_idxd_scan_accel_engine, config_number), spdk_json_decode_uint32},
+};
+
+static void
+rpc_idxd_scan_accel_engine(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *params)
+{
+ struct rpc_idxd_scan_accel_engine req = {};
+ struct spdk_json_write_ctx *w;
+
+ if (params != NULL) {
+ if (spdk_json_decode_object(params, rpc_idxd_scan_accel_engine_decoder,
+ SPDK_COUNTOF(rpc_idxd_scan_accel_engine_decoder),
+ &req)) {
+ SPDK_ERRLOG("spdk_json_decode_object() failed\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "Invalid parameters");
+ return;
+ }
+ }
+
+ SPDK_NOTICELOG("Enabling IDXD with config #%u\n", req.config_number);
+ accel_engine_idxd_enable_probe(req.config_number);
+
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_bool(w, true);
+ spdk_jsonrpc_end_result(request, w);
+}
+SPDK_RPC_REGISTER("idxd_scan_accel_engine", rpc_idxd_scan_accel_engine, SPDK_RPC_STARTUP)
diff --git a/src/spdk/module/accel/ioat/Makefile b/src/spdk/module/accel/ioat/Makefile
new file mode 100644
index 000000000..0e43adbb1
--- /dev/null
+++ b/src/spdk/module/accel/ioat/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+SO_VER := 2
+SO_MINOR := 0
+
+LIBNAME = accel_ioat
+C_SRCS = accel_engine_ioat.c accel_engine_ioat_rpc.c
+
+SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map
+
+include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk
diff --git a/src/spdk/module/accel/ioat/accel_engine_ioat.c b/src/spdk/module/accel/ioat/accel_engine_ioat.c
new file mode 100644
index 000000000..0fff3a7c5
--- /dev/null
+++ b/src/spdk/module/accel/ioat/accel_engine_ioat.c
@@ -0,0 +1,764 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "accel_engine_ioat.h"
+
+#include "spdk/stdinc.h"
+
+#include "spdk_internal/accel_engine.h"
+#include "spdk_internal/log.h"
+
+#include "spdk/env.h"
+#include "spdk/conf.h"
+#include "spdk/event.h"
+#include "spdk/thread.h"
+#include "spdk/ioat.h"
+#include "spdk/crc32.h"
+
+#define ALIGN_4K 0x1000
+
+enum ioat_accel_opcode {
+ IOAT_ACCEL_OPCODE_MEMMOVE = 0,
+ IOAT_ACCEL_OPCODE_MEMFILL = 1,
+ IOAT_ACCEL_OPCODE_COMPARE = 2,
+ IOAT_ACCEL_OPCODE_CRC32C = 3,
+ IOAT_ACCEL_OPCODE_DUALCAST = 4,
+};
+
+struct ioat_accel_op {
+ struct ioat_io_channel *ioat_ch;
+ void *cb_arg;
+ spdk_accel_completion_cb cb_fn;
+ void *src;
+ union {
+ void *dst;
+ void *src2;
+ };
+ void *dst2;
+ uint32_t seed;
+ uint64_t fill_pattern;
+ enum ioat_accel_opcode op_code;
+ uint64_t nbytes;
+ TAILQ_ENTRY(ioat_accel_op) link;
+};
+
+static int g_batch_size;
+static bool g_ioat_enable = false;
+static bool g_ioat_initialized = false;
+
+struct ioat_probe_ctx {
+ int num_whitelist_devices;
+ struct spdk_pci_addr whitelist[IOAT_MAX_CHANNELS];
+};
+
+static struct ioat_probe_ctx g_probe_ctx;
+
+struct ioat_device {
+ struct spdk_ioat_chan *ioat;
+ bool is_allocated;
+ /** linked list pointer for device list */
+ TAILQ_ENTRY(ioat_device) tailq;
+};
+
+struct pci_device {
+ struct spdk_pci_device *pci_dev;
+ TAILQ_ENTRY(pci_device) tailq;
+};
+
+static TAILQ_HEAD(, ioat_device) g_devices = TAILQ_HEAD_INITIALIZER(g_devices);
+static pthread_mutex_t g_ioat_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static TAILQ_HEAD(, pci_device) g_pci_devices = TAILQ_HEAD_INITIALIZER(g_pci_devices);
+
+struct ioat_io_channel {
+ struct spdk_ioat_chan *ioat_ch;
+ struct ioat_device *ioat_dev;
+ struct spdk_poller *poller;
+ TAILQ_HEAD(, ioat_accel_op) op_pool;
+ TAILQ_HEAD(, ioat_accel_op) sw_batch; /* for operations not hw accelerated */
+ bool hw_batch; /* for operations that are hw accelerated */
+};
+
+static int
+ioat_find_dev_by_whitelist_bdf(const struct spdk_pci_addr *pci_addr,
+ const struct spdk_pci_addr *whitelist,
+ int num_whitelist_devices)
+{
+ int i;
+
+ for (i = 0; i < num_whitelist_devices; i++) {
+ if (spdk_pci_addr_compare(pci_addr, &whitelist[i]) == 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static struct ioat_device *
+ioat_allocate_device(void)
+{
+ struct ioat_device *dev;
+
+ pthread_mutex_lock(&g_ioat_mutex);
+ TAILQ_FOREACH(dev, &g_devices, tailq) {
+ if (!dev->is_allocated) {
+ dev->is_allocated = true;
+ pthread_mutex_unlock(&g_ioat_mutex);
+ return dev;
+ }
+ }
+ pthread_mutex_unlock(&g_ioat_mutex);
+
+ return NULL;
+}
+
+static void
+ioat_free_device(struct ioat_device *dev)
+{
+ pthread_mutex_lock(&g_ioat_mutex);
+ dev->is_allocated = false;
+ pthread_mutex_unlock(&g_ioat_mutex);
+}
+
+struct ioat_task {
+ spdk_accel_completion_cb cb;
+};
+
+static int accel_engine_ioat_init(void);
+static void accel_engine_ioat_exit(void *ctx);
+static void accel_engine_ioat_config_text(FILE *fp);
+
+static size_t
+accel_engine_ioat_get_ctx_size(void)
+{
+ return sizeof(struct ioat_task) + sizeof(struct spdk_accel_task);
+}
+
+SPDK_ACCEL_MODULE_REGISTER(accel_engine_ioat_init, accel_engine_ioat_exit,
+ accel_engine_ioat_config_text, NULL,
+ accel_engine_ioat_get_ctx_size)
+
+static void
+ioat_done(void *cb_arg)
+{
+ struct spdk_accel_task *accel_req;
+ struct ioat_task *ioat_task = cb_arg;
+
+ accel_req = (struct spdk_accel_task *)
+ ((uintptr_t)ioat_task -
+ offsetof(struct spdk_accel_task, offload_ctx));
+
+ ioat_task->cb(accel_req, 0);
+}
+
+static int
+ioat_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+
+ assert(ioat_ch->ioat_ch != NULL);
+
+ ioat_task->cb = cb_fn;
+
+ return spdk_ioat_submit_copy(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, src, nbytes);
+}
+
+static int
+ioat_submit_fill(struct spdk_io_channel *ch, void *dst, uint8_t fill,
+ uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+ uint64_t fill64 = 0x0101010101010101ULL * fill;
+
+ assert(ioat_ch->ioat_ch != NULL);
+
+ ioat_task->cb = cb_fn;
+
+ return spdk_ioat_submit_fill(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, fill64, nbytes);
+}
+
+static int
+ioat_poll(void *arg)
+{
+ struct spdk_ioat_chan *chan = arg;
+
+ return spdk_ioat_process_events(chan) != 0 ? SPDK_POLLER_BUSY :
+ SPDK_POLLER_IDLE;
+}
+
+static struct spdk_io_channel *ioat_get_io_channel(void);
+
+/*
+ * The IOAT engine only supports these capabilities as hardware
+ * accelerated. The accel fw will handle unsupported functions
+ * by calling the software implementations of the functions.
+ */
+static uint64_t
+ioat_get_capabilities(void)
+{
+ return ACCEL_COPY | ACCEL_FILL | ACCEL_BATCH;
+}
+
+/* The IOAT batch functions exposed by the accel fw do not match up 1:1
+ * with the functions in the IOAT library. The IOAT library directly only
+ * supports construction of accelerated functions via the IOAT native
+ * interface. The accel_fw batch capabilities are implemented here in the
+ * plug-in and rely on either the IOAT library for accelerated commands
+ * or software functions for non-accelerated.
+ */
+static uint32_t
+ioat_batch_get_max(void)
+{
+ return g_batch_size;
+}
+
+static struct spdk_accel_batch *
+ioat_batch_create(struct spdk_io_channel *ch)
+{
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+
+ if (!TAILQ_EMPTY(&ioat_ch->sw_batch) || (ioat_ch->hw_batch == true)) {
+ SPDK_ERRLOG("IOAT accel engine only supports one batch at a time.\n");
+ return NULL;
+ }
+
+ return (struct spdk_accel_batch *)&ioat_ch->hw_batch;
+}
+
+static struct ioat_accel_op *
+_prep_op(struct ioat_io_channel *ioat_ch, struct spdk_accel_batch *batch,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_accel_op *op;
+
+ if ((struct spdk_accel_batch *)&ioat_ch->hw_batch != batch) {
+ SPDK_ERRLOG("Invalid batch\n");
+ return NULL;
+ }
+
+ if (!TAILQ_EMPTY(&ioat_ch->op_pool)) {
+ op = TAILQ_FIRST(&ioat_ch->op_pool);
+ TAILQ_REMOVE(&ioat_ch->op_pool, op, link);
+ } else {
+ SPDK_ERRLOG("Ran out of operations for batch\n");
+ return NULL;
+ }
+
+ op->cb_arg = cb_arg;
+ op->cb_fn = cb_fn;
+ op->ioat_ch = ioat_ch;
+
+ return op;
+}
+
+static int
+ioat_batch_prep_copy(struct spdk_io_channel *ch, struct spdk_accel_batch *batch,
+ void *dst, void *src, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+ struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
+
+ ioat_task->cb = cb_fn;
+ ioat_ch->hw_batch = true;
+
+ /* Call the IOAT library prep function. */
+ return spdk_ioat_build_copy(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, src, nbytes);
+}
+
+static int
+ioat_batch_prep_fill(struct spdk_io_channel *ch, struct spdk_accel_batch *batch, void *dst,
+ uint8_t fill, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+ struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
+ uint64_t fill_pattern;
+
+ ioat_task->cb = cb_fn;
+ ioat_ch->hw_batch = true;
+ memset(&fill_pattern, fill, sizeof(uint64_t));
+
+ /* Call the IOAT library prep function. */
+ return spdk_ioat_build_fill(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, fill_pattern, nbytes);
+}
+
+static int
+ioat_batch_prep_dualcast(struct spdk_io_channel *ch,
+ struct spdk_accel_batch *batch, void *dst1, void *dst2,
+ void *src, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_accel_op *op;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+
+ if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
+ SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
+ return -EINVAL;
+ }
+
+ op = _prep_op(ioat_ch, batch, cb_fn, cb_arg);
+ if (op == NULL) {
+ return -EINVAL;
+ }
+
+ /* Command specific. */
+ op->src = src;
+ op->dst = dst1;
+ op->dst2 = dst2;
+ op->nbytes = nbytes;
+ op->op_code = IOAT_ACCEL_OPCODE_DUALCAST;
+ TAILQ_INSERT_TAIL(&ioat_ch->sw_batch, op, link);
+
+ return 0;
+}
+
+static int
+ioat_batch_prep_compare(struct spdk_io_channel *ch,
+ struct spdk_accel_batch *batch, void *src1,
+ void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_accel_op *op;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+
+ op = _prep_op(ioat_ch, batch, cb_fn, cb_arg);
+ if (op == NULL) {
+ return -EINVAL;
+ }
+
+ /* Command specific. */
+ op->src = src1;
+ op->src2 = src2;
+ op->nbytes = nbytes;
+ op->op_code = IOAT_ACCEL_OPCODE_COMPARE;
+ TAILQ_INSERT_TAIL(&ioat_ch->sw_batch, op, link);
+
+ return 0;
+}
+
+static int
+ioat_batch_prep_crc32c(struct spdk_io_channel *ch,
+ struct spdk_accel_batch *batch, uint32_t *dst, void *src,
+ uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_accel_op *op;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+
+ op = _prep_op(ioat_ch, batch, cb_fn, cb_arg);
+ if (op == NULL) {
+ return -EINVAL;
+ }
+
+ /* Command specific. */
+ op->dst = (void *)dst;
+ op->src = src;
+ op->seed = seed;
+ op->nbytes = nbytes;
+ op->op_code = IOAT_ACCEL_OPCODE_CRC32C;
+ TAILQ_INSERT_TAIL(&ioat_ch->sw_batch, op, link);
+
+ return 0;
+}
+
+static int
+ioat_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *batch)
+{
+ struct ioat_accel_op *op;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+
+ if ((struct spdk_accel_batch *)&ioat_ch->hw_batch != batch) {
+ SPDK_ERRLOG("Invalid batch\n");
+ return -EINVAL;
+ }
+
+ /* Flush the batched HW items, there's no way to cancel these without resetting. */
+ spdk_ioat_flush(ioat_ch->ioat_ch);
+ ioat_ch->hw_batch = false;
+
+ /* Return batched software items to the pool. */
+ while ((op = TAILQ_FIRST(&ioat_ch->sw_batch))) {
+ TAILQ_REMOVE(&ioat_ch->sw_batch, op, link);
+ TAILQ_INSERT_TAIL(&ioat_ch->op_pool, op, link);
+ }
+
+ return 0;
+}
+
+static int
+ioat_batch_submit(struct spdk_io_channel *ch, struct spdk_accel_batch *batch,
+ spdk_accel_completion_cb cb_fn, void *cb_arg)
+{
+ struct ioat_accel_op *op;
+ struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
+ struct spdk_accel_task *accel_req;
+ int batch_status = 0, cmd_status = 0;
+
+ if ((struct spdk_accel_batch *)&ioat_ch->hw_batch != batch) {
+ SPDK_ERRLOG("Invalid batch\n");
+ return -EINVAL;
+ }
+
+ /* Flush the batched HW items first. */
+ spdk_ioat_flush(ioat_ch->ioat_ch);
+ ioat_ch->hw_batch = false;
+
+ /* Complete the batched software items. */
+ while ((op = TAILQ_FIRST(&ioat_ch->sw_batch))) {
+ TAILQ_REMOVE(&ioat_ch->sw_batch, op, link);
+ accel_req = (struct spdk_accel_task *)((uintptr_t)op->cb_arg -
+ offsetof(struct spdk_accel_task, offload_ctx));
+
+ switch (op->op_code) {
+ case IOAT_ACCEL_OPCODE_DUALCAST:
+ memcpy(op->dst, op->src, op->nbytes);
+ memcpy(op->dst2, op->src, op->nbytes);
+ break;
+ case IOAT_ACCEL_OPCODE_COMPARE:
+ cmd_status = memcmp(op->src, op->src2, op->nbytes);
+ break;
+ case IOAT_ACCEL_OPCODE_CRC32C:
+ *(uint32_t *)op->dst = spdk_crc32c_update(op->src, op->nbytes, ~op->seed);
+ break;
+ default:
+ assert(false);
+ break;
+ }
+
+ batch_status |= cmd_status;
+ op->cb_fn(accel_req, cmd_status);
+ TAILQ_INSERT_TAIL(&ioat_ch->op_pool, op, link);
+ }
+
+ /* Now complete the batch request itself. */
+ accel_req = (struct spdk_accel_task *)((uintptr_t)cb_arg -
+ offsetof(struct spdk_accel_task, offload_ctx));
+ cb_fn(accel_req, batch_status);
+
+ return 0;
+}
+
+static struct spdk_accel_engine ioat_accel_engine = {
+ .get_capabilities = ioat_get_capabilities,
+ .copy = ioat_submit_copy,
+ .fill = ioat_submit_fill,
+ .batch_get_max = ioat_batch_get_max,
+ .batch_create = ioat_batch_create,
+ .batch_cancel = ioat_batch_cancel,
+ .batch_prep_copy = ioat_batch_prep_copy,
+ .batch_prep_dualcast = ioat_batch_prep_dualcast,
+ .batch_prep_compare = ioat_batch_prep_compare,
+ .batch_prep_fill = ioat_batch_prep_fill,
+ .batch_prep_crc32c = ioat_batch_prep_crc32c,
+ .batch_submit = ioat_batch_submit,
+ .get_io_channel = ioat_get_io_channel,
+};
+
+static int
+ioat_create_cb(void *io_device, void *ctx_buf)
+{
+ struct ioat_io_channel *ch = ctx_buf;
+ struct ioat_device *ioat_dev;
+ struct ioat_accel_op *op;
+ int i;
+
+ ioat_dev = ioat_allocate_device();
+ if (ioat_dev == NULL) {
+ return -1;
+ }
+
+ TAILQ_INIT(&ch->sw_batch);
+ ch->hw_batch = false;
+ TAILQ_INIT(&ch->op_pool);
+
+ g_batch_size = spdk_ioat_get_max_descriptors(ioat_dev->ioat);
+ for (i = 0 ; i < g_batch_size ; i++) {
+ op = calloc(1, sizeof(struct ioat_accel_op));
+ if (op == NULL) {
+ SPDK_ERRLOG("Failed to allocate operation for batch.\n");
+ while ((op = TAILQ_FIRST(&ch->op_pool))) {
+ TAILQ_REMOVE(&ch->op_pool, op, link);
+ free(op);
+ }
+ return -ENOMEM;
+ }
+ TAILQ_INSERT_TAIL(&ch->op_pool, op, link);
+ }
+
+ ch->ioat_dev = ioat_dev;
+ ch->ioat_ch = ioat_dev->ioat;
+ ch->poller = SPDK_POLLER_REGISTER(ioat_poll, ch->ioat_ch, 0);
+ return 0;
+}
+
+static void
+ioat_destroy_cb(void *io_device, void *ctx_buf)
+{
+ struct ioat_io_channel *ch = ctx_buf;
+ struct ioat_accel_op *op;
+
+ while ((op = TAILQ_FIRST(&ch->op_pool))) {
+ TAILQ_REMOVE(&ch->op_pool, op, link);
+ free(op);
+ }
+
+ ioat_free_device(ch->ioat_dev);
+ spdk_poller_unregister(&ch->poller);
+}
+
+static struct spdk_io_channel *
+ioat_get_io_channel(void)
+{
+ return spdk_get_io_channel(&ioat_accel_engine);
+}
+
+static bool
+probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
+{
+ struct ioat_probe_ctx *ctx = cb_ctx;
+ struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
+ struct pci_device *pdev;
+
+ SPDK_INFOLOG(SPDK_LOG_ACCEL_IOAT,
+ " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
+ pci_addr.domain,
+ pci_addr.bus,
+ pci_addr.dev,
+ pci_addr.func,
+ spdk_pci_device_get_vendor_id(pci_dev),
+ spdk_pci_device_get_device_id(pci_dev));
+
+ pdev = calloc(1, sizeof(*pdev));
+ if (pdev == NULL) {
+ return false;
+ }
+ pdev->pci_dev = pci_dev;
+ TAILQ_INSERT_TAIL(&g_pci_devices, pdev, tailq);
+
+ if (ctx->num_whitelist_devices > 0 &&
+ !ioat_find_dev_by_whitelist_bdf(&pci_addr, ctx->whitelist, ctx->num_whitelist_devices)) {
+ return false;
+ }
+
+ /* Claim the device in case conflict with other process */
+ if (spdk_pci_device_claim(pci_dev) < 0) {
+ return false;
+ }
+
+ return true;
+}
+
+static void
+attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *ioat)
+{
+ struct ioat_device *dev;
+
+ dev = calloc(1, sizeof(*dev));
+ if (dev == NULL) {
+ SPDK_ERRLOG("Failed to allocate device struct\n");
+ return;
+ }
+
+ dev->ioat = ioat;
+ TAILQ_INSERT_TAIL(&g_devices, dev, tailq);
+}
+
+void
+accel_engine_ioat_enable_probe(void)
+{
+ g_ioat_enable = true;
+}
+
+static int
+accel_engine_ioat_add_whitelist_device(const char *pci_bdf)
+{
+ if (pci_bdf == NULL) {
+ return -1;
+ }
+
+ if (g_probe_ctx.num_whitelist_devices >= IOAT_MAX_CHANNELS) {
+ SPDK_ERRLOG("Ioat whitelist is full (max size is %d)\n",
+ IOAT_MAX_CHANNELS);
+ return -1;
+ }
+
+ if (spdk_pci_addr_parse(&g_probe_ctx.whitelist[g_probe_ctx.num_whitelist_devices],
+ pci_bdf) < 0) {
+ SPDK_ERRLOG("Invalid address %s\n", pci_bdf);
+ return -1;
+ }
+
+ g_probe_ctx.num_whitelist_devices++;
+
+ return 0;
+}
+
+int
+accel_engine_ioat_add_whitelist_devices(const char *pci_bdfs[], size_t num_pci_bdfs)
+{
+ size_t i;
+
+ for (i = 0; i < num_pci_bdfs; i++) {
+ if (accel_engine_ioat_add_whitelist_device(pci_bdfs[i]) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+accel_engine_ioat_read_config_file_params(struct spdk_conf_section *sp)
+{
+ int i;
+ char *val, *pci_bdf;
+
+ if (spdk_conf_section_get_boolval(sp, "Enable", false)) {
+ g_ioat_enable = true;
+ /* Enable Ioat */
+ }
+
+ val = spdk_conf_section_get_val(sp, "Disable");
+ if (val != NULL) {
+ SPDK_WARNLOG("\"Disable\" option is deprecated and will be removed in a future release.\n");
+ SPDK_WARNLOG("IOAT is now disabled by default. It may be enabled by \"Enable Yes\"\n");
+
+ if (g_ioat_enable && (strcasecmp(val, "Yes") == 0)) {
+ SPDK_ERRLOG("\"Enable Yes\" and \"Disable Yes\" cannot be set at the same time\n");
+ return -1;
+ }
+ }
+
+ /* Init the whitelist */
+ for (i = 0; ; i++) {
+ pci_bdf = spdk_conf_section_get_nmval(sp, "Whitelist", i, 0);
+ if (!pci_bdf) {
+ break;
+ }
+
+ if (accel_engine_ioat_add_whitelist_device(pci_bdf) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+accel_engine_ioat_init(void)
+{
+ struct spdk_conf_section *sp;
+ int rc;
+
+ sp = spdk_conf_find_section(NULL, "Ioat");
+ if (sp != NULL) {
+ rc = accel_engine_ioat_read_config_file_params(sp);
+ if (rc != 0) {
+ SPDK_ERRLOG("accel_engine_ioat_read_config_file_params() failed\n");
+ return rc;
+ }
+ }
+
+ if (!g_ioat_enable) {
+ return 0;
+ }
+
+ if (spdk_ioat_probe(&g_probe_ctx, probe_cb, attach_cb) != 0) {
+ SPDK_ERRLOG("spdk_ioat_probe() failed\n");
+ return -1;
+ }
+
+ g_ioat_initialized = true;
+ SPDK_NOTICELOG("Accel engine updated to use IOAT engine.\n");
+ spdk_accel_hw_engine_register(&ioat_accel_engine);
+ spdk_io_device_register(&ioat_accel_engine, ioat_create_cb, ioat_destroy_cb,
+ sizeof(struct ioat_io_channel), "ioat_accel_engine");
+ return 0;
+}
+
+static void
+accel_engine_ioat_exit(void *ctx)
+{
+ struct ioat_device *dev;
+ struct pci_device *pci_dev;
+
+ if (g_ioat_initialized) {
+ spdk_io_device_unregister(&ioat_accel_engine, NULL);
+ }
+
+ while (!TAILQ_EMPTY(&g_devices)) {
+ dev = TAILQ_FIRST(&g_devices);
+ TAILQ_REMOVE(&g_devices, dev, tailq);
+ spdk_ioat_detach(dev->ioat);
+ ioat_free_device(dev);
+ free(dev);
+ }
+
+ while (!TAILQ_EMPTY(&g_pci_devices)) {
+ pci_dev = TAILQ_FIRST(&g_pci_devices);
+ TAILQ_REMOVE(&g_pci_devices, pci_dev, tailq);
+ spdk_pci_device_detach(pci_dev->pci_dev);
+ free(pci_dev);
+ }
+
+ spdk_accel_engine_module_finish();
+}
+
+#define ACCEL_ENGINE_IOAT_HEADER_TMPL \
+"[Ioat]\n" \
+" # Users may not want to use offload even it is available.\n" \
+" # Users may use the whitelist to initialize specified devices, IDS\n" \
+" # uses BUS:DEVICE.FUNCTION to identify each Ioat channel.\n"
+
+#define ACCEL_ENGINE_IOAT_ENABLE_TMPL \
+" Enable %s\n"
+
+#define ACCEL_ENGINE_IOAT_WHITELIST_TMPL \
+" Whitelist %.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8 "\n"
+
+static void
+accel_engine_ioat_config_text(FILE *fp)
+{
+ int i;
+ struct spdk_pci_addr *dev;
+
+ fprintf(fp, ACCEL_ENGINE_IOAT_HEADER_TMPL);
+ fprintf(fp, ACCEL_ENGINE_IOAT_ENABLE_TMPL, g_ioat_enable ? "Yes" : "No");
+
+ for (i = 0; i < g_probe_ctx.num_whitelist_devices; i++) {
+ dev = &g_probe_ctx.whitelist[i];
+ fprintf(fp, ACCEL_ENGINE_IOAT_WHITELIST_TMPL,
+ dev->domain, dev->bus, dev->dev, dev->func);
+ }
+}
+
+SPDK_LOG_REGISTER_COMPONENT("accel_ioat", SPDK_LOG_ACCEL_IOAT)
diff --git a/src/spdk/module/accel/ioat/accel_engine_ioat.h b/src/spdk/module/accel/ioat/accel_engine_ioat.h
new file mode 100644
index 000000000..26a167eb6
--- /dev/null
+++ b/src/spdk/module/accel/ioat/accel_engine_ioat.h
@@ -0,0 +1,44 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SPDK_ACCEL_ENGINE_IOAT_H
+#define SPDK_ACCEL_ENGINE_IOAT_H
+
+#include "spdk/stdinc.h"
+
+#define IOAT_MAX_CHANNELS 64
+
+int accel_engine_ioat_add_whitelist_devices(const char *pci_bdfs[], size_t num_pci_bdfs);
+void accel_engine_ioat_enable_probe(void);
+
+#endif /* SPDK_ACCEL_ENGINE_IOAT_H */
diff --git a/src/spdk/module/accel/ioat/accel_engine_ioat_rpc.c b/src/spdk/module/accel/ioat/accel_engine_ioat_rpc.c
new file mode 100644
index 000000000..7f2322536
--- /dev/null
+++ b/src/spdk/module/accel/ioat/accel_engine_ioat_rpc.c
@@ -0,0 +1,116 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "accel_engine_ioat.h"
+
+#include "spdk/rpc.h"
+#include "spdk/util.h"
+#include "spdk/event.h"
+
+struct rpc_pci_whitelist {
+ size_t num_bdfs;
+ char *bdfs[IOAT_MAX_CHANNELS];
+};
+
+static int
+decode_rpc_pci_whitelist(const struct spdk_json_val *val, void *out)
+{
+ struct rpc_pci_whitelist *pci_whitelist = out;
+
+ return spdk_json_decode_array(val, spdk_json_decode_string, pci_whitelist->bdfs,
+ IOAT_MAX_CHANNELS, &pci_whitelist->num_bdfs, sizeof(char *));
+}
+
+static void
+free_rpc_pci_whitelist(struct rpc_pci_whitelist *list)
+{
+ size_t i;
+
+ for (i = 0; i < list->num_bdfs; i++) {
+ free(list->bdfs[i]);
+ }
+}
+
+struct rpc_ioat_scan_accel_engine {
+ struct rpc_pci_whitelist pci_whitelist;
+};
+
+static void
+free_rpc_ioat_scan_accel_engine(struct rpc_ioat_scan_accel_engine *p)
+{
+ free_rpc_pci_whitelist(&p->pci_whitelist);
+}
+
+static const struct spdk_json_object_decoder rpc_ioat_scan_accel_engine_decoder[] = {
+ {"pci_whitelist", offsetof(struct rpc_ioat_scan_accel_engine, pci_whitelist), decode_rpc_pci_whitelist},
+};
+
+static void
+rpc_ioat_scan_accel_engine(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *params)
+{
+ struct rpc_ioat_scan_accel_engine req = {};
+ struct spdk_json_write_ctx *w;
+ int rc;
+
+ if (params != NULL) {
+ if (spdk_json_decode_object(params, rpc_ioat_scan_accel_engine_decoder,
+ SPDK_COUNTOF(rpc_ioat_scan_accel_engine_decoder),
+ &req)) {
+ free_rpc_ioat_scan_accel_engine(&req);
+ SPDK_ERRLOG("spdk_json_decode_object() failed\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "Invalid parameters");
+ return;
+ }
+
+ rc = accel_engine_ioat_add_whitelist_devices((const char **)req.pci_whitelist.bdfs,
+ req.pci_whitelist.num_bdfs);
+ free_rpc_ioat_scan_accel_engine(&req);
+ if (rc < 0) {
+ SPDK_ERRLOG("accel_engine_ioat_add_whitelist_devices() failed\n");
+ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
+ "Invalid parameters");
+ return;
+ }
+ }
+
+ accel_engine_ioat_enable_probe();
+
+ w = spdk_jsonrpc_begin_result(request);
+ spdk_json_write_bool(w, true);
+ spdk_jsonrpc_end_result(request, w);
+}
+SPDK_RPC_REGISTER("ioat_scan_accel_engine", rpc_ioat_scan_accel_engine, SPDK_RPC_STARTUP)
+SPDK_RPC_REGISTER_ALIAS_DEPRECATED(ioat_scan_accel_engine, ioat_scan_copy_engine)
+SPDK_RPC_REGISTER_ALIAS_DEPRECATED(ioat_scan_accel_engine, scan_ioat_copy_engine)