summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/lib/librte_cryptodev
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/lib/librte_cryptodev
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/lib/librte_cryptodev')
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/Makefile30
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/cryptodev_trace_points.c75
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/meson.build13
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_crypto.h445
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_asym.h648
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_sym.h939
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.c2014
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.h1341
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.c159
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h521
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace.h149
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace_fp.h38
-rw-r--r--src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_version.map106
13 files changed, 6478 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/Makefile b/src/spdk/dpdk/lib/librte_cryptodev/Makefile
new file mode 100644
index 000000000..73e77a27c
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_cryptodev.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
+LDLIBS += -lrte_kvargs
+
+# library source files
+SRCS-y += rte_cryptodev.c rte_cryptodev_pmd.c cryptodev_trace_points.c
+
+# export include files
+SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_asym.h
+SYMLINK-y-include += rte_crypto_sym.h
+SYMLINK-y-include += rte_cryptodev.h
+SYMLINK-y-include += rte_cryptodev_pmd.h
+SYMLINK-y-include += rte_cryptodev_trace.h
+SYMLINK-y-include += rte_cryptodev_trace_fp.h
+
+# versioning export map
+EXPORT_MAP := rte_cryptodev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/cryptodev_trace_points.c b/src/spdk/dpdk/lib/librte_cryptodev/cryptodev_trace_points.c
new file mode 100644
index 000000000..7672c7b99
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/cryptodev_trace_points.c
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_trace_point_register.h>
+
+#include "rte_cryptodev_trace.h"
+
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_configure);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_start);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_stop);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_close);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_queue_pair_setup);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_sym_session_pool_create);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_sym_session_create);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_asym_session_create);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_sym_session_free);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_asym_session_free);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_sym_session_init);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_asym_session_init);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_sym_session_clear);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_asym_session_clear);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_enqueue_burst);
+RTE_TRACE_POINT_DEFINE(rte_cryptodev_trace_dequeue_burst);
+
+RTE_INIT(cryptodev_trace_init)
+{
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_configure,
+ lib.cryptodev.configure);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_start,
+ lib.cryptodev.start);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_stop,
+ lib.cryptodev.stop);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_close,
+ lib.cryptodev.close);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_queue_pair_setup,
+ lib.cryptodev.queue.pair.setup);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_sym_session_pool_create,
+ lib.cryptodev.sym.pool.create);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_sym_session_create,
+ lib.cryptodev.sym.create);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_session_create,
+ lib.cryptodev.asym.create);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_sym_session_free,
+ lib.cryptodev.sym.free);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_session_free,
+ lib.cryptodev.asym.free);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_sym_session_init,
+ lib.cryptodev.sym.init);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_session_init,
+ lib.cryptodev.asym.init);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_sym_session_clear,
+ lib.cryptodev.sym.clear);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_asym_session_clear,
+ lib.cryptodev.asym.clear);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_enqueue_burst,
+ lib.cryptodev.enq.burst);
+
+ RTE_TRACE_POINT_REGISTER(rte_cryptodev_trace_dequeue_burst,
+ lib.cryptodev.deq.burst);
+}
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/meson.build b/src/spdk/dpdk/lib/librte_cryptodev/meson.build
new file mode 100644
index 000000000..df1144058
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2019 Intel Corporation
+
+use_function_versioning = true
+sources = files('rte_cryptodev.c', 'rte_cryptodev_pmd.c', 'cryptodev_trace_points.c')
+headers = files('rte_cryptodev.h',
+ 'rte_cryptodev_pmd.h',
+ 'rte_cryptodev_trace.h',
+ 'rte_cryptodev_trace_fp.h',
+ 'rte_crypto.h',
+ 'rte_crypto_sym.h',
+ 'rte_crypto_asym.h')
+deps += ['kvargs', 'mbuf']
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto.h
new file mode 100644
index 000000000..fd5ef3a87
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _RTE_CRYPTO_H_
+#define _RTE_CRYPTO_H_
+
+/**
+ * @file rte_crypto.h
+ *
+ * RTE Cryptography Common Definitions
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+
+#include "rte_crypto_sym.h"
+#include "rte_crypto_asym.h"
+
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ /**< Undefined operation type */
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ /**< Symmetric operation */
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC
+ /**< Asymmetric operation */
+};
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ /**< Operation completed successfully */
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by a crypto device */
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ /**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**<
+ * Symmetric operation failed due to invalid session arguments, or if
+ * in session-less mode, failed to allocate private operation material.
+ */
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ /**< Error handling operation */
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_op_sess_type {
+ RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_OP_SESSIONLESS, /**< Session-less crypto operation */
+ RTE_CRYPTO_OP_SECURITY_SESSION /**< Security session crypto operation */
+};
+
+/**
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ __extension__
+ union {
+ uint64_t raw;
+ __extension__
+ struct {
+ uint8_t type;
+ /**< operation type */
+ uint8_t status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation
+ * from mempool and will be set to
+ * RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+ uint8_t sess_type;
+ /**< operation session type */
+ uint8_t reserved[3];
+ /**< Reserved bytes to fill 64 bits for
+ * future additions
+ */
+ uint16_t private_data_offset;
+ /**< Offset to indicate start of private data (if any).
+ * The offset is counted from the start of the
+ * rte_crypto_op including IV.
+ * The private data may be used by the application
+ * to store information which should remain untouched
+ * in the library/driver
+ */
+ };
+ };
+ struct rte_mempool *mempool;
+ /**< crypto operation mempool which operation is allocated from */
+
+ rte_iova_t phys_addr;
+ /**< physical address of crypto operation */
+
+ __extension__
+ union {
+ struct rte_crypto_sym_op sym[0];
+ /**< Symmetric operation parameters */
+
+ struct rte_crypto_asym_op asym[0];
+ /**< Asymmetric operation parameters */
+
+ }; /**< operation specific parameters */
+};
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->type = type;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
+
+ switch (type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ __rte_crypto_sym_op_reset(op->sym);
+ break;
+ case RTE_CRYPTO_OP_TYPE_ASYMMETRIC:
+ memset(op->asym, 0, sizeof(struct rte_crypto_asym_op));
+ break;
+ case RTE_CRYPTO_OP_TYPE_UNDEFINED:
+ default:
+ break;
+ }
+}
+
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.
+ */
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ /**< Crypto op pool type operation. */
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ (struct rte_crypto_op_pool_private *) rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as crypto operations
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ priv = (struct rte_crypto_op_pool_private *) rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != type &&
+ priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))
+ return -EINVAL;
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = NULL;
+ int retval;
+
+ retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate crypto operations from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ * @param ops Array to place allocated crypto operations
+ * @param nb_ops Number of crypto operations to allocate
+ *
+ * @returns
+ * - nb_ops if the number of operations requested were allocated.
+ * - 0 if the requested number of ops are not available.
+ * None are allocated in this case.
+ */
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_crypto_op_reset(ops[i], type);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size)) {
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_sym_op));
+ if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_asym_op));
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf which is associated with the crypto operation, the
+ * operation will be allocated in the private data of that
+ * mbuf.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /*
+ * check that the mbuf's private data size is sufficient to contain a
+ * crypto operation
+ */
+ if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op))))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym->m_src = m;
+
+ return op;
+}
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type and configures
+ * the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ void *priv_data;
+ uint32_t size;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return NULL;
+
+ size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,
+ nb_xforms);
+}
+
+
+/**
+ * Attach a session to a crypto operation
+ *
+ * @param op crypto operation, must be of type symmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -1;
+
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+ return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
+}
+
+/**
+ * Attach a asymmetric session to a crypto operation
+ *
+ * @param op crypto operation, must be of type asymmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_asym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_asym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_ASYMMETRIC))
+ return -1;
+
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->asym->session = sess;
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_asym.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_asym.h
new file mode 100644
index 000000000..9c866f553
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_asym.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef _RTE_CRYPTO_ASYM_H_
+#define _RTE_CRYPTO_ASYM_H_
+
+/**
+ * @file rte_crypto_asym.h
+ *
+ * RTE Definitions for Asymmetric Cryptography
+ *
+ * Defines asymmetric algorithms and modes, as well as supported
+ * asymmetric crypto operations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+#include <stdint.h>
+
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+
+#include "rte_crypto_sym.h"
+
+/**
+ * Buffer to hold crypto params required for asym operations.
+ *
+ * These buffers can be used for both input to PMD and output from PMD. When
+ * used for output from PMD, application has to ensure the buffer is large
+ * enough to hold the target data.
+ */
+typedef struct rte_crypto_param_t {
+ uint8_t *data;
+ /**< pointer to buffer holding data */
+ rte_iova_t iova;
+ /**< IO address of data buffer */
+ size_t length;
+ /**< length of data in bytes */
+} rte_crypto_param;
+
+/** asym xform type name strings */
+extern const char *
+rte_crypto_asym_xform_strings[];
+
+/** asym operations type name strings */
+extern const char *
+rte_crypto_asym_op_strings[];
+
+/**
+ * Asymmetric crypto transformation types.
+ * Each xform type maps to one asymmetric algorithm
+ * performing specific operation
+ *
+ */
+enum rte_crypto_asym_xform_type {
+ RTE_CRYPTO_ASYM_XFORM_UNSPECIFIED = 0,
+ /**< Invalid xform. */
+ RTE_CRYPTO_ASYM_XFORM_NONE,
+ /**< Xform type None.
+ * May be supported by PMD to support
+ * passthrough op for debugging purpose.
+ * if xform_type none , op_type is disregarded.
+ */
+ RTE_CRYPTO_ASYM_XFORM_RSA,
+ /**< RSA. Performs Encrypt, Decrypt, Sign and Verify.
+ * Refer to rte_crypto_asym_op_type
+ */
+ RTE_CRYPTO_ASYM_XFORM_DH,
+ /**< Diffie-Hellman.
+ * Performs Key Generate and Shared Secret Compute.
+ * Refer to rte_crypto_asym_op_type
+ */
+ RTE_CRYPTO_ASYM_XFORM_DSA,
+ /**< Digital Signature Algorithm
+ * Performs Signature Generation and Verification.
+ * Refer to rte_crypto_asym_op_type
+ */
+ RTE_CRYPTO_ASYM_XFORM_MODINV,
+ /**< Modular Multiplicative Inverse
+ * Perform Modular Multiplicative Inverse b^(-1) mod n
+ */
+ RTE_CRYPTO_ASYM_XFORM_MODEX,
+ /**< Modular Exponentiation
+ * Perform Modular Exponentiation b^e mod n
+ */
+ RTE_CRYPTO_ASYM_XFORM_ECDSA,
+ /**< Elliptic Curve Digital Signature Algorithm
+ * Perform Signature Generation and Verification.
+ */
+ RTE_CRYPTO_ASYM_XFORM_ECPM,
+ /**< Elliptic Curve Point Multiplication */
+ RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+ /**< End of list */
+};
+
+/**
+ * Asymmetric crypto operation type variants
+ */
+enum rte_crypto_asym_op_type {
+ RTE_CRYPTO_ASYM_OP_ENCRYPT,
+ /**< Asymmetric Encrypt operation */
+ RTE_CRYPTO_ASYM_OP_DECRYPT,
+ /**< Asymmetric Decrypt operation */
+ RTE_CRYPTO_ASYM_OP_SIGN,
+ /**< Signature Generation operation */
+ RTE_CRYPTO_ASYM_OP_VERIFY,
+ /**< Signature Verification operation */
+ RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE,
+ /**< DH Private Key generation operation */
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE,
+ /**< DH Public Key generation operation */
+ RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE,
+ /**< DH Shared Secret compute operation */
+ RTE_CRYPTO_ASYM_OP_LIST_END
+};
+
+/**
+ * Padding types for RSA signature.
+ */
+enum rte_crypto_rsa_padding_type {
+ RTE_CRYPTO_RSA_PADDING_NONE = 0,
+ /**< RSA no padding scheme */
+ RTE_CRYPTO_RSA_PADDING_PKCS1_5,
+ /**< RSA PKCS#1 PKCS1-v1_5 padding scheme. For signatures block type 01,
+ * for encryption block type 02 are used.
+ */
+ RTE_CRYPTO_RSA_PADDING_OAEP,
+ /**< RSA PKCS#1 OAEP padding scheme */
+ RTE_CRYPTO_RSA_PADDING_PSS,
+ /**< RSA PKCS#1 PSS padding scheme */
+ RTE_CRYPTO_RSA_PADDING_TYPE_LIST_END
+};
+
+/**
+ * RSA private key type enumeration
+ *
+ * enumerates private key format required to perform RSA crypto
+ * transform.
+ *
+ */
+enum rte_crypto_rsa_priv_key_type {
+ RTE_RSA_KEY_TYPE_EXP,
+ /**< RSA private key is an exponent */
+ RTE_RSA_KET_TYPE_QT,
+ /**< RSA private key is in quintuple format
+ * See rte_crypto_rsa_priv_key_qt
+ */
+};
+
+/**
+ * Structure describing RSA private key in quintuple format.
+ * See PKCS V1.5 RSA Cryptography Standard.
+ */
+struct rte_crypto_rsa_priv_key_qt {
+ rte_crypto_param p;
+ /**< p - Private key component P
+ * Private key component of RSA parameter required for CRT method
+ * of private key operations in Octet-string network byte order
+ * format.
+ */
+
+ rte_crypto_param q;
+ /**< q - Private key component Q
+ * Private key component of RSA parameter required for CRT method
+ * of private key operations in Octet-string network byte order
+ * format.
+ */
+
+ rte_crypto_param dP;
+ /**< dP - Private CRT component
+ * Private CRT component of RSA parameter required for CRT method
+ * RSA private key operations in Octet-string network byte order
+ * format.
+ * dP = d mod ( p - 1 )
+ */
+
+ rte_crypto_param dQ;
+ /**< dQ - Private CRT component
+ * Private CRT component of RSA parameter required for CRT method
+ * RSA private key operations in Octet-string network byte order
+ * format.
+ * dQ = d mod ( q - 1 )
+ */
+
+ rte_crypto_param qInv;
+ /**< qInv - Private CRT component
+ * Private CRT component of RSA parameter required for CRT method
+ * RSA private key operations in Octet-string network byte order
+ * format.
+ * qInv = inv q mod p
+ */
+};
+
+/**
+ * Asymmetric RSA transform data
+ *
+ * Structure describing RSA xform params
+ *
+ */
+struct rte_crypto_rsa_xform {
+ rte_crypto_param n;
+ /**< n - Modulus
+ * Modulus data of RSA operation in Octet-string network
+ * byte order format.
+ */
+
+ rte_crypto_param e;
+ /**< e - Public key exponent
+ * Public key exponent used for RSA public key operations in Octet-
+ * string network byte order format.
+ */
+
+ enum rte_crypto_rsa_priv_key_type key_type;
+
+ __extension__
+ union {
+ rte_crypto_param d;
+ /**< d - Private key exponent
+ * Private key exponent used for RSA
+ * private key operations in
+ * Octet-string network byte order format.
+ */
+
+ struct rte_crypto_rsa_priv_key_qt qt;
+ /**< qt - Private key in quintuple format */
+ };
+};
+
+/**
+ * Asymmetric Modular exponentiation transform data
+ *
+ * Structure describing modular exponentiation xform param
+ *
+ */
+struct rte_crypto_modex_xform {
+ rte_crypto_param modulus;
+ /**< modulus
+ * Pointer to the modulus data for modexp transform operation
+ * in octet-string network byte order format
+ *
+ * In case this number is equal to zero the driver shall set
+ * the crypto op status field to RTE_CRYPTO_OP_STATUS_ERROR
+ */
+
+ rte_crypto_param exponent;
+ /**< exponent
+ * Exponent of the modexp transform operation in
+ * octet-string network byte order format
+ */
+};
+
+/**
+ * Asymmetric modular multiplicative inverse transform operation
+ *
+ * Structure describing modular multiplicative inverse transform
+ *
+ */
+struct rte_crypto_modinv_xform {
+ rte_crypto_param modulus;
+ /**<
+ * Pointer to the modulus data for modular multiplicative inverse
+ * operation in octet-string network byte order format
+ *
+ * In case this number is equal to zero the driver shall set
+ * the crypto op status field to RTE_CRYPTO_OP_STATUS_ERROR
+ *
+ * This number shall be relatively prime to base
+ * in corresponding Modular Multiplicative Inverse
+ * rte_crypto_mod_op_param
+ */
+};
+
+/**
+ * Asymmetric DH transform data
+ *
+ * Structure describing deffie-hellman xform params
+ *
+ */
+struct rte_crypto_dh_xform {
+ enum rte_crypto_asym_op_type type;
+ /**< Setup xform for key generate or shared secret compute */
+
+ rte_crypto_param p;
+ /**< p : Prime modulus data
+ * DH prime modulus data in octet-string network byte order format.
+ *
+ */
+
+ rte_crypto_param g;
+ /**< g : Generator
+ * DH group generator data in octet-string network byte order
+ * format.
+ *
+ */
+};
+
+/**
+ * Asymmetric Digital Signature transform operation
+ *
+ * Structure describing DSA xform params
+ *
+ */
+struct rte_crypto_dsa_xform {
+ rte_crypto_param p;
+ /**< p - Prime modulus
+ * Prime modulus data for DSA operation in Octet-string network byte
+ * order format.
+ */
+ rte_crypto_param q;
+ /**< q : Order of the subgroup.
+ * Order of the subgroup data in Octet-string network byte order
+ * format.
+ * (p-1) % q = 0
+ */
+ rte_crypto_param g;
+ /**< g: Generator of the subgroup
+ * Generator data in Octet-string network byte order format.
+ */
+ rte_crypto_param x;
+ /**< x: Private key of the signer in octet-string network
+ * byte order format.
+ * Used when app has pre-defined private key.
+ * Valid only when xform chain is DSA ONLY.
+ * if xform chain is DH private key generate + DSA, then DSA sign
+ * compute will use internally generated key.
+ */
+};
+
+/**
+ * TLS named curves
+ * https://tools.ietf.org/html/rfc8422
+ */
+enum rte_crypto_ec_group {
+ RTE_CRYPTO_EC_GROUP_UNKNOWN = 0,
+ RTE_CRYPTO_EC_GROUP_SECP192R1 = 19,
+ RTE_CRYPTO_EC_GROUP_SECP224R1 = 21,
+ RTE_CRYPTO_EC_GROUP_SECP256R1 = 23,
+ RTE_CRYPTO_EC_GROUP_SECP384R1 = 24,
+ RTE_CRYPTO_EC_GROUP_SECP521R1 = 25,
+};
+
+/**
+ * Structure for elliptic curve point
+ */
+struct rte_crypto_ec_point {
+ rte_crypto_param x;
+ /**< X coordinate */
+ rte_crypto_param y;
+ /**< Y coordinate */
+};
+
+/**
+ * Asymmetric elliptic curve transform data
+ *
+ * Structure describing all EC based xform params
+ *
+ */
+struct rte_crypto_ec_xform {
+ enum rte_crypto_ec_group curve_id;
+ /**< Pre-defined ec groups */
+};
+
+/**
+ * Operations params for modular operations:
+ * exponentiation and multiplicative inverse
+ *
+ */
+struct rte_crypto_mod_op_param {
+ rte_crypto_param base;
+ /**<
+ * Pointer to base of modular exponentiation/multiplicative
+ * inverse data in octet-string network byte order format
+ *
+ * In case Multiplicative Inverse is used this number shall
+ * be relatively prime to modulus in corresponding Modular
+ * Multiplicative Inverse rte_crypto_modinv_xform
+ */
+
+ rte_crypto_param result;
+ /**<
+ * Pointer to the result of modular exponentiation/multiplicative inverse
+ * data in octet-string network byte order format.
+ *
+ * This field shall be big enough to hold the result of Modular
+ * Exponentiation or Modular Multiplicative Inverse
+ * (bigger or equal to length of modulus)
+ */
+};
+
+/**
+ * Asymmetric crypto transform data
+ *
+ * Structure describing asym xforms.
+ */
+struct rte_crypto_asym_xform {
+ struct rte_crypto_asym_xform *next;
+ /**< Pointer to next xform to set up xform chain.*/
+ enum rte_crypto_asym_xform_type xform_type;
+ /**< Asymmetric crypto transform */
+
+ __extension__
+ union {
+ struct rte_crypto_rsa_xform rsa;
+ /**< RSA xform parameters */
+
+ struct rte_crypto_modex_xform modex;
+ /**< Modular Exponentiation xform parameters */
+
+ struct rte_crypto_modinv_xform modinv;
+ /**< Modular Multiplicative Inverse xform parameters */
+
+ struct rte_crypto_dh_xform dh;
+ /**< DH xform parameters */
+
+ struct rte_crypto_dsa_xform dsa;
+ /**< DSA xform parameters */
+
+ struct rte_crypto_ec_xform ec;
+ /**< EC xform parameters, used by elliptic curve based
+ * operations.
+ */
+ };
+};
+
+struct rte_cryptodev_asym_session;
+
+/**
+ * RSA operation params
+ *
+ */
+struct rte_crypto_rsa_op_param {
+ enum rte_crypto_asym_op_type op_type;
+ /**< Type of RSA operation for transform */
+
+ rte_crypto_param message;
+ /**<
+ * Pointer to input data
+ * - to be encrypted for RSA public encrypt.
+ * - to be signed for RSA sign generation.
+ * - to be authenticated for RSA sign verification.
+ *
+ * Pointer to output data
+ * - for RSA private decrypt.
+ * In this case the underlying array should have been
+ * allocated with enough memory to hold plaintext output
+ * (i.e. must be at least RSA key size). The message.length
+ * field should be 0 and will be overwritten by the PMD
+ * with the decrypted length.
+ *
+ * All data is in Octet-string network byte order format.
+ */
+
+ rte_crypto_param cipher;
+ /**<
+ * Pointer to input data
+ * - to be decrypted for RSA private decrypt.
+ *
+ * Pointer to output data
+ * - for RSA public encrypt.
+ * In this case the underlying array should have been allocated
+ * with enough memory to hold ciphertext output (i.e. must be
+ * at least RSA key size). The cipher.length field should
+ * be 0 and will be overwritten by the PMD with the encrypted length.
+ *
+ * All data is in Octet-string network byte order format.
+ */
+
+ rte_crypto_param sign;
+ /**<
+ * Pointer to input data
+ * - to be verified for RSA public decrypt.
+ *
+ * Pointer to output data
+ * - for RSA private encrypt.
+ * In this case the underlying array should have been allocated
+ * with enough memory to hold signature output (i.e. must be
+ * at least RSA key size). The sign.length field should
+ * be 0 and will be overwritten by the PMD with the signature length.
+ *
+ * All data is in Octet-string network byte order format.
+ */
+
+ enum rte_crypto_rsa_padding_type pad;
+ /**< RSA padding scheme to be used for transform */
+
+ enum rte_crypto_auth_algorithm md;
+ /**< Hash algorithm to be used for data hash if padding
+ * scheme is either OAEP or PSS. Valid hash algorithms
+ * are:
+ * MD5, SHA1, SHA224, SHA256, SHA384, SHA512
+ */
+
+ enum rte_crypto_auth_algorithm mgf1md;
+ /**<
+ * Hash algorithm to be used for mask generation if
+ * padding scheme is either OAEP or PSS. If padding
+ * scheme is unspecified data hash algorithm is used
+ * for mask generation. Valid hash algorithms are:
+ * MD5, SHA1, SHA224, SHA256, SHA384, SHA512
+ */
+};
+
+/**
+ * Diffie-Hellman Operations params.
+ * @note:
+ */
+struct rte_crypto_dh_op_param {
+ rte_crypto_param pub_key;
+ /**<
+ * Output generated public key when xform type is
+ * DH PUB_KEY_GENERATION.
+ * Input peer public key when xform type is DH
+ * SHARED_SECRET_COMPUTATION
+ * pub_key is in octet-string network byte order format.
+ *
+ */
+
+ rte_crypto_param priv_key;
+ /**<
+ * Output generated private key if xform type is
+ * DH PRIVATE_KEY_GENERATION
+ * Input when xform type is DH SHARED_SECRET_COMPUTATION.
+ * priv_key is in octet-string network byte order format.
+ *
+ */
+
+ rte_crypto_param shared_secret;
+ /**<
+ * Output with calculated shared secret
+ * when dh xform set up with op type = SHARED_SECRET_COMPUTATION.
+ * shared_secret is an octet-string network byte order format.
+ *
+ */
+};
+
+/**
+ * DSA Operations params
+ *
+ */
+struct rte_crypto_dsa_op_param {
+ enum rte_crypto_asym_op_type op_type;
+ /**< Signature Generation or Verification */
+ rte_crypto_param message;
+ /**< input message to be signed or verified */
+ rte_crypto_param r;
+ /**< dsa sign component 'r' value
+ *
+ * output if op_type = sign generate,
+ * input if op_type = sign verify
+ */
+ rte_crypto_param s;
+ /**< dsa sign component 's' value
+ *
+ * output if op_type = sign generate,
+ * input if op_type = sign verify
+ */
+ rte_crypto_param y;
+ /**< y : Public key of the signer.
+ * Public key data of the signer in Octet-string network byte order
+ * format.
+ * y = g^x mod p
+ */
+};
+
+/**
+ * ECDSA operation params
+ */
+struct rte_crypto_ecdsa_op_param {
+ enum rte_crypto_asym_op_type op_type;
+ /**< Signature generation or verification */
+
+ rte_crypto_param pkey;
+ /**< Private key of the signer for signature generation */
+
+ struct rte_crypto_ec_point q;
+ /**< Public key of the signer for verification */
+
+ rte_crypto_param message;
+ /**< Input message digest to be signed or verified */
+
+ rte_crypto_param k;
+ /**< The ECDSA per-message secret number, which is an integer
+ * in the interval (1, n-1)
+ */
+
+ rte_crypto_param r;
+ /**< r component of elliptic curve signature
+ * output : for signature generation
+ * input : for signature verification
+ */
+ rte_crypto_param s;
+ /**< s component of elliptic curve signature
+ * output : for signature generation
+ * input : for signature verification
+ */
+};
+
+/**
+ * Structure for EC point multiplication operation param
+ */
+struct rte_crypto_ecpm_op_param {
+ struct rte_crypto_ec_point p;
+ /**< x and y coordinates of input point */
+
+ struct rte_crypto_ec_point r;
+ /**< x and y coordinates of resultant point */
+
+ rte_crypto_param scalar;
+ /**< Scalar to multiply the input point */
+};
+
+/**
+ * Asymmetric Cryptographic Operation.
+ *
+ * Structure describing asymmetric crypto operation params.
+ *
+ */
+struct rte_crypto_asym_op {
+ RTE_STD_C11
+ union {
+ struct rte_cryptodev_asym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_asym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ };
+
+ __extension__
+ union {
+ struct rte_crypto_rsa_op_param rsa;
+ struct rte_crypto_mod_op_param modex;
+ struct rte_crypto_mod_op_param modinv;
+ struct rte_crypto_dh_op_param dh;
+ struct rte_crypto_dsa_op_param dsa;
+ struct rte_crypto_ecdsa_op_param ecdsa;
+ struct rte_crypto_ecpm_op_param ecpm;
+ };
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_ASYM_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_sym.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 000000000..da961a19d
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,939 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+
+/**
+ * Crypto IO Vector (in analogy with struct iovec)
+ * Supposed be used to pass input/output data buffers for crypto data-path
+ * functions.
+ */
+struct rte_crypto_vec {
+ /** virtual address of the data buffer */
+ void *base;
+ /** IOVA of the data buffer */
+ rte_iova_t iova;
+ /** length of the data buffer */
+ uint32_t len;
+};
+
+/**
+ * Crypto scatter-gather list descriptor. Consists of a pointer to an array
+ * of Crypto IO vectors with its size.
+ */
+struct rte_crypto_sgl {
+ /** start of an array of vectors */
+ struct rte_crypto_vec *vec;
+ /** size of an array of vectors */
+ uint32_t num;
+};
+
+/**
+ * Synchronous operation descriptor.
+ * Supposed to be used with CPU crypto API call.
+ */
+struct rte_crypto_sym_vec {
+ /** array of SGL vectors */
+ struct rte_crypto_sgl *sgl;
+ /** array of pointers to IV */
+ void **iv;
+ /** array of pointers to AAD */
+ void **aad;
+ /** array of pointers to digest */
+ void **digest;
+ /**
+ * array of statuses for each operation:
+ * - 0 on success
+ * - errno on error
+ */
+ int32_t *status;
+ /** number of operations to perform */
+ uint32_t num;
+};
+
+/**
+ * used for cpu_crypto_process_bulk() to specify head/tail offsets
+ * for auth/cipher processing.
+ */
+union rte_crypto_sym_ofs {
+ uint64_t raw;
+ struct {
+ struct {
+ uint16_t head;
+ uint16_t tail;
+ } auth, cipher;
+ } ofs;
+};
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+ RTE_CRYPTO_CIPHER_NULL = 1,
+ /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+ RTE_CRYPTO_CIPHER_3DES_CBC,
+ /**< Triple DES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_3DES_CTR,
+ /**< Triple DES algorithm in CTR mode */
+ RTE_CRYPTO_CIPHER_3DES_ECB,
+ /**< Triple DES algorithm in ECB mode */
+
+ RTE_CRYPTO_CIPHER_AES_CBC,
+ /**< AES algorithm in CBC mode */
+ RTE_CRYPTO_CIPHER_AES_CTR,
+ /**< AES algorithm in Counter mode */
+ RTE_CRYPTO_CIPHER_AES_ECB,
+ /**< AES algorithm in ECB mode */
+ RTE_CRYPTO_CIPHER_AES_F8,
+ /**< AES algorithm in F8 mode */
+ RTE_CRYPTO_CIPHER_AES_XTS,
+ /**< AES algorithm in XTS mode */
+
+ RTE_CRYPTO_CIPHER_ARC4,
+ /**< (A)RC4 cipher algorithm */
+
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ /**< KASUMI algorithm in F8 mode */
+
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ /**< SNOW 3G algorithm in UEA2 mode */
+
+ RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ /**< ZUC algorithm in EEA3 mode */
+
+ RTE_CRYPTO_CIPHER_DES_CBC,
+ /**< DES algorithm in CBC mode */
+
+ RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ /**< AES algorithm using modes required by
+ * DOCSIS Baseline Privacy Plus Spec.
+ * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
+ * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
+ */
+
+ RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ /**< DES algorithm using modes required by
+ * DOCSIS Baseline Privacy Plus Spec.
+ * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
+ * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
+ */
+
+ RTE_CRYPTO_CIPHER_LIST_END
+
+};
+
+/** Cipher algorithm name strings */
+extern const char *
+rte_crypto_cipher_algorithm_strings[];
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ /**< Encrypt cipher operation */
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ /**< Decrypt cipher operation */
+};
+
+/** Cipher operation name strings */
+extern const char *
+rte_crypto_cipher_operation_strings[];
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ * use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+ enum rte_crypto_cipher_operation op;
+ /**< This parameter determines if the cipher operation is an encrypt or
+ * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+ * only encrypt operations are valid.
+ */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< Cipher algorithm */
+
+ struct {
+ const uint8_t *data; /**< pointer to key data */
+ uint16_t length; /**< key length in bytes */
+ } key;
+ /**< Cipher key
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+ * point to a concatenation of the AES encryption key followed by a
+ * keymask. As per RFC3711, the keymask should be padded with trailing
+ * bytes to match the length of the encryption key used.
+ *
+ * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+ * 192 bits (24 bytes) or 256 bits (32 bytes).
+ *
+ * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+ * should be set to the combined length of the encryption key and the
+ * keymask. Since the keymask and the encryption key are the same size,
+ * key.length should be set to 2 x the AES encryption key length.
+ *
+ * For the AES-XTS mode of operation:
+ * - Two keys must be provided and key.length refers to total length of
+ * the two keys.
+ * - key.data must point to the two keys concatenated together
+ * (key1 || key2).
+ * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+ * - Both keys must have the same size.
+ **/
+ struct {
+ uint16_t offset;
+ /**< Starting point for Initialisation Vector or Counter,
+ * specified as number of bytes from start of crypto
+ * operation (rte_crypto_op).
+ *
+ * - For block ciphers in CBC or F8 mode, or for KASUMI
+ * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this. Note that the PMDs may
+ * modify the memory reserved (the first byte and the
+ * final padding)
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for KASUMI
+ * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_NULL = 1,
+ /**< NULL hash algorithm. */
+
+ RTE_CRYPTO_AUTH_AES_CBC_MAC,
+ /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+ RTE_CRYPTO_AUTH_AES_CMAC,
+ /**< AES CMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_GMAC,
+ /**< AES GMAC algorithm. */
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ /**< AES XCBC algorithm. */
+
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ /**< KASUMI algorithm in F9 mode. */
+
+ RTE_CRYPTO_AUTH_MD5,
+ /**< MD5 algorithm */
+ RTE_CRYPTO_AUTH_MD5_HMAC,
+ /**< HMAC using MD5 algorithm */
+
+ RTE_CRYPTO_AUTH_SHA1,
+ /**< 160 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA1_HMAC,
+ /**< HMAC using 160 bit SHA algorithm.
+ * HMAC-SHA-1-96 can be generated by setting
+ * digest_length to 12 bytes in auth/aead xforms.
+ */
+ RTE_CRYPTO_AUTH_SHA224,
+ /**< 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA224_HMAC,
+ /**< HMAC using 224 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256,
+ /**< 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA256_HMAC,
+ /**< HMAC using 256 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384,
+ /**< 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA384_HMAC,
+ /**< HMAC using 384 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512,
+ /**< 512 bit SHA algorithm. */
+ RTE_CRYPTO_AUTH_SHA512_HMAC,
+ /**< HMAC using 512 bit SHA algorithm. */
+
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ /**< SNOW 3G algorithm in UIA2 mode. */
+
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+
+ RTE_CRYPTO_AUTH_SHA3_224,
+ /**< 224 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+ /**< HMAC using 224 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_256,
+ /**< 256 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+ /**< HMAC using 256 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_384,
+ /**< 384 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+ /**< HMAC using 384 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_512,
+ /**< 512 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+ /**< HMAC using 512 bit SHA3 algorithm. */
+
+ RTE_CRYPTO_AUTH_LIST_END
+};
+
+/** Authentication algorithm name strings */
+extern const char *
+rte_crypto_auth_algorithm_strings[];
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+ RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
+ RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
+};
+
+/** Authentication operation name strings */
+extern const char *
+rte_crypto_auth_operation_strings[];
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+ enum rte_crypto_auth_operation op;
+ /**< Authentication operation type */
+ enum rte_crypto_auth_algorithm algo;
+ /**< Authentication algorithm selection */
+
+ struct {
+ const uint8_t *data; /**< pointer to key data */
+ uint16_t length; /**< key length in bytes */
+ } key;
+ /**< Authentication key data.
+ * The authentication key length MUST be less than or equal to the
+ * block size of the algorithm. It is the callers responsibility to
+ * ensure that the key length is compliant with the standard being used
+ * (for example RFC 2104, FIPS 198a).
+ */
+
+ struct {
+ uint16_t offset;
+ /**< Starting point for Initialisation Vector or Counter,
+ * specified as number of bytes from start of crypto
+ * operation (rte_crypto_op).
+ *
+ * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
+ * this is the authentication Initialisation Vector
+ * (IV) value. For AES-GMAC IV description please refer
+ * to the field `length` in iv struct.
+ *
+ * - For KASUMI in F9 mode and other authentication
+ * algorithms, this field is not used.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
+ * for AES-GMAC, this is the length of the IV.
+ *
+ * - For KASUMI in F9 mode and other authentication
+ * algorithms, this field is not used.
+ *
+ * - For GMAC mode, this is either:
+ * 1) Number greater or equal to one, which means that IV
+ * is used and J0 will be computed internally, a minimum
+ * of 16 bytes must be allocated.
+ * 2) Zero, in which case data points to J0. In this case
+ * 16 bytes of J0 should be passed where J0 is defined
+ * by NIST SP800-38D.
+ *
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ uint16_t digest_length;
+ /**< Length of the digest to be returned. If the verify option is set,
+ * this specifies the length of the digest to be compared for the
+ * session.
+ *
+ * It is the caller's responsibility to ensure that the
+ * digest length is compliant with the hash algorithm being used.
+ * If the value is less than the maximum length allowed by the hash,
+ * the result shall be truncated.
+ */
+};
+
+
+/** Symmetric AEAD Algorithms */
+enum rte_crypto_aead_algorithm {
+ RTE_CRYPTO_AEAD_AES_CCM = 1,
+ /**< AES algorithm in CCM mode. */
+ RTE_CRYPTO_AEAD_AES_GCM,
+ /**< AES algorithm in GCM mode. */
+ RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+ /**< Chacha20 cipher with poly1305 authenticator */
+ RTE_CRYPTO_AEAD_LIST_END
+};
+
+/** AEAD algorithm name strings */
+extern const char *
+rte_crypto_aead_algorithm_strings[];
+
+/** Symmetric AEAD Operations */
+enum rte_crypto_aead_operation {
+ RTE_CRYPTO_AEAD_OP_ENCRYPT,
+ /**< Encrypt and generate digest */
+ RTE_CRYPTO_AEAD_OP_DECRYPT
+ /**< Verify digest and decrypt */
+};
+
+/** Authentication operation name strings */
+extern const char *
+rte_crypto_aead_operation_strings[];
+
+struct rte_crypto_aead_xform {
+ enum rte_crypto_aead_operation op;
+ /**< AEAD operation type */
+ enum rte_crypto_aead_algorithm algo;
+ /**< AEAD algorithm selection */
+
+ struct {
+ const uint8_t *data; /**< pointer to key data */
+ uint16_t length; /**< key length in bytes */
+ } key;
+
+ struct {
+ uint16_t offset;
+ /**< Starting point for Initialisation Vector or Counter,
+ * specified as number of bytes from start of crypto
+ * operation (rte_crypto_op).
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For Chacha20-Poly1305 it is 96-bit nonce.
+ * PMD sets initial counter for Poly1305 key generation
+ * part to 0 and for Chacha20 encryption to 1 as per
+ * rfc8439 2.8. AEAD construction.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For GCM mode, this is either:
+ * 1) Number greater or equal to one, which means that IV
+ * is used and J0 will be computed internally, a minimum
+ * of 16 bytes must be allocated.
+ * 2) Zero, in which case data points to J0. In this case
+ * 16 bytes of J0 should be passed where J0 is defined
+ * by NIST SP800-38D.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ *
+ * - For Chacha20-Poly1305 this field is always 12.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ uint16_t digest_length;
+
+ uint16_t aad_length;
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * For CCM mode, this is the length of the actual AAD, even though
+ * it is required to reserve 18 bytes before the AAD and padding
+ * at the end of it, so a multiple of 16 bytes is allocated.
+ */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+ RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
+ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */
+ RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+ struct rte_crypto_sym_xform *next;
+ /**< next xform in chain */
+ enum rte_crypto_sym_xform_type type
+ ; /**< xform type */
+ RTE_STD_C11
+ union {
+ struct rte_crypto_auth_xform auth;
+ /**< Authentication / hash xform */
+ struct rte_crypto_cipher_xform cipher;
+ /**< Cipher xform */
+ struct rte_crypto_aead_xform aead;
+ /**< AEAD xform */
+ };
+};
+
+struct rte_cryptodev_sym_session;
+
+/**
+ * Symmetric Cryptographic Operation.
+ *
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
+ *
+ * When a symmetric crypto operation is enqueued with the device for processing
+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
+ * which contains the source data which the crypto operation is to be performed
+ * on.
+ * While the mbuf is in use by a crypto operation no part of the mbuf should be
+ * changed by the application as the device may read or write to any part of the
+ * mbuf. In the case of hardware crypto devices some or all of the mbuf
+ * may be DMAed in and out of the device, so writing over the original data,
+ * though only the part specified by the rte_crypto_sym_op for transformation
+ * will be changed.
+ * Out-of-place (OOP) operation, where the source mbuf is different to the
+ * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
+ * The part copied includes all the parts of the source mbuf that will be
+ * operated on, based on the cipher.data.offset+cipher.data.length and
+ * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
+ * indicated by the cipher parameters will be transformed, any extra data around
+ * this indicated by the auth parameters will be copied unchanged from source to
+ * destination mbuf.
+ * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
+ * both source and destination mbufs. As these offsets are relative to the
+ * data_off parameter in each mbuf this can result in the data written to the
+ * destination buffer being at a different alignment, relative to buffer start,
+ * to the data in the source buffer.
+ */
+struct rte_crypto_sym_op {
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
+
+ RTE_STD_C11
+ union {
+ struct rte_cryptodev_sym_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_crypto_sym_xform *xform;
+ /**< Session-less API crypto operation parameters */
+ struct rte_security_session *sec_session;
+ /**< Handle for the initialised security session context */
+ };
+
+ RTE_STD_C11
+ union {
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for AEAD processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ */
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ */
+ } data; /**< Data offsets and length for AEAD */
+ struct {
+ uint8_t *data;
+ /**< This points to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ */
+ rte_iova_t phys_addr;
+ /**< Physical address of digest */
+ } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM)
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the first block (16 bytes)
+ * and the length encoding in the first two bytes of the
+ * second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * - Note that PMDs may modify the memory reserved
+ * (first 18 bytes and the final padding).
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ */
+ rte_iova_t phys_addr; /**< physical address */
+ } aad;
+ /**< Additional authentication parameters */
+ } aead;
+
+ struct {
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing,
+ * specified as number of bytes from start
+ * of data in the source buffer.
+ * The result of the cipher operation will be
+ * written back into the output buffer
+ * starting at this location.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
+ * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
+ */
+ uint32_t length;
+ /**< The message length, in bytes, of the
+ * source buffer on which the cryptographic
+ * operation will be computed.
+ * This must be a multiple of the block size
+ * if a block cipher is being used. This is
+ * also the same as the result length.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
+ * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
+ * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
+ */
+ } data; /**< Data offsets and length for ciphering */
+ } cipher;
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing,
+ * specified as number of bytes from start of
+ * packet in source buffer.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
+ * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
+ *
+ * @note
+ * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
+ * this offset should be such that
+ * data to authenticate starts at COUNT.
+ */
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
+ * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
+ *
+ * @note
+ * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
+ * the length should include the COUNT,
+ * FRESH, message, direction bit and padding
+ * (to be multiple of 8 bits).
+ */
+ } data;
+ /**< Data offsets and length for authentication */
+
+ struct {
+ uint8_t *data;
+ /**< This points to the location where
+ * the digest result should be inserted
+ * (in the case of digest generation)
+ * or where the purported digest exists
+ * (in the case of digest verification).
+ *
+ * At session creation time, the client
+ * specified the digest result length with
+ * the digest_length member of the
+ * @ref rte_crypto_auth_xform structure.
+ * For physical crypto devices the caller
+ * must allocate at least digest_length of
+ * physically contiguous memory at this
+ * location.
+ *
+ * For digest generation, the digest result
+ * will overwrite any data at this location.
+ *
+ * @note
+ * Digest-encrypted case.
+ * Digest can be generated, appended to
+ * the end of raw data and encrypted
+ * together using chained digest
+ * generation
+ * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
+ * and encryption
+ * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ * xforms. Similarly, authentication
+ * of the raw data against appended,
+ * decrypted digest, can be performed
+ * using decryption
+ * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ * and digest verification
+ * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
+ * chained xforms.
+ * To perform those operations, a few
+ * additional conditions must be met:
+ * - caller must allocate at least
+ * digest_length of memory at the end of
+ * source and (in case of out-of-place
+ * operations) destination buffer; those
+ * buffers can be linear or split using
+ * scatter-gather lists,
+ * - digest data pointer must point to
+ * the end of source or (in case of
+ * out-of-place operations) destination
+ * data, which is pointer to the
+ * data buffer + auth.data.offset +
+ * auth.data.length,
+ * - cipher.data.offset +
+ * cipher.data.length must be greater
+ * than auth.data.offset +
+ * auth.data.length and is typically
+ * equal to auth.data.offset +
+ * auth.data.length + digest_length.
+ * - for wireless algorithms, i.e.
+ * SNOW 3G, KASUMI and ZUC, as the
+ * cipher.data.length,
+ * cipher.data.offset,
+ * auth.data.length and
+ * auth.data.offset are in bits, they
+ * must be 8-bit multiples.
+ *
+ * Note, that for security reasons, it
+ * is PMDs' responsibility to not
+ * leave an unencrypted digest in any
+ * buffer after performing auth-cipher
+ * operations.
+ *
+ */
+ rte_iova_t phys_addr;
+ /**< Physical address of digest */
+ } digest; /**< Digest parameters */
+ } auth;
+ };
+ };
+};
+
+
+/**
+ * Reset the fields of a symmetric operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+ memset(op, 0, sizeof(*op));
+}
+
+
+/**
+ * Allocate space for symmetric crypto xforms in the private data space of the
+ * crypto operation. This also defaults the crypto xform type to
+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
+ * in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_sym_xform *
+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
+ void *priv_data, uint8_t nb_xforms)
+{
+ struct rte_crypto_sym_xform *xform;
+
+ sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return sym_op->xform;
+}
+
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param sym_op crypto operation
+ * @param sess cryptodev session
+ */
+static inline int
+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ sym_op->session = sess;
+
+ return 0;
+}
+
+/**
+ * Converts portion of mbuf data into a vector representation.
+ * Each segment will be represented as a separate entry in *vec* array.
+ * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
+ * @param mb
+ * Pointer to the *rte_mbuf* object.
+ * @param ofs
+ * Offset within mbuf data to start with.
+ * @param len
+ * Length of data to represent.
+ * @param vec
+ * Pointer to an output array of IO vectors.
+ * @param num
+ * Size of an output array.
+ * @return
+ * - number of successfully filled entries in *vec* array.
+ * - negative number of elements in *vec* array required.
+ */
+__rte_experimental
+static inline int
+rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
+ struct rte_crypto_vec vec[], uint32_t num)
+{
+ uint32_t i;
+ struct rte_mbuf *nseg;
+ uint32_t left;
+ uint32_t seglen;
+
+ /* assuming that requested data starts in the first segment */
+ RTE_ASSERT(mb->data_len > ofs);
+
+ if (mb->nb_segs > num)
+ return -mb->nb_segs;
+
+ vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
+ vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
+
+ /* whole data lies in the first segment */
+ seglen = mb->data_len - ofs;
+ if (len <= seglen) {
+ vec[0].len = len;
+ return 1;
+ }
+
+ /* data spread across segments */
+ vec[0].len = seglen;
+ left = len - seglen;
+ for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
+
+ vec[i].base = rte_pktmbuf_mtod(nseg, void *);
+ vec[i].iova = rte_pktmbuf_iova(nseg);
+
+ seglen = nseg->data_len;
+ if (left <= seglen) {
+ /* whole requested data is completed */
+ vec[i].len = left;
+ left = 0;
+ break;
+ }
+
+ /* use whole segment */
+ vec[i].len = seglen;
+ left -= seglen;
+ }
+
+ RTE_ASSERT(left == 0);
+ return i + 1;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.c b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.c
new file mode 100644
index 000000000..e37b83afd
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.c
@@ -0,0 +1,2014 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020 Intel Corporation
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_interrupts.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_compat.h>
+#include <rte_function_versioning.h>
+
+#include "rte_crypto.h"
+#include "rte_cryptodev.h"
+#include "rte_cryptodev_pmd.h"
+#include "rte_cryptodev_trace.h"
+
+static uint8_t nb_drivers;
+
+static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
+
+struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
+
+static struct rte_cryptodev_global cryptodev_globals = {
+ .devs = rte_crypto_devices,
+ .data = { NULL },
+ .nb_devs = 0
+};
+
+/* spinlock for crypto device callbacks */
+static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+static const struct rte_cryptodev_capabilities
+ cryptodev_undefined_capabilities[] = {
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static struct rte_cryptodev_capabilities
+ *capability_copy[RTE_CRYPTO_MAX_DEVS];
+static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS];
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_cryptodev_callback {
+ TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
+ rte_cryptodev_cb_fn cb_fn; /**< Callback address */
+ void *cb_arg; /**< Parameter for callback */
+ enum rte_cryptodev_event_type event; /**< Interrupt event type */
+ uint32_t active; /**< Callback is executing */
+};
+
+/**
+ * The crypto cipher algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_cipher_algorithm_strings[] = {
+ [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
+ [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
+ [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
+
+ [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
+ [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
+ [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
+ [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
+ [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
+ [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
+
+ [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
+
+ [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
+ [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
+
+ [RTE_CRYPTO_CIPHER_NULL] = "null",
+
+ [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
+ [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
+ [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
+};
+
+/**
+ * The crypto cipher operation strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_cipher_operation_strings[] = {
+ [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
+ [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
+};
+
+/**
+ * The crypto auth algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_auth_algorithm_strings[] = {
+ [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
+ [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
+ [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
+ [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
+
+ [RTE_CRYPTO_AUTH_MD5] = "md5",
+ [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
+
+ [RTE_CRYPTO_AUTH_NULL] = "null",
+
+ [RTE_CRYPTO_AUTH_SHA1] = "sha1",
+ [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
+
+ [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
+ [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
+ [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
+ [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
+ [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
+ [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
+ [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
+ [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
+
+ [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
+ [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
+ [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
+};
+
+/**
+ * The crypto AEAD algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_aead_algorithm_strings[] = {
+ [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
+ [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
+ [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
+};
+
+/**
+ * The crypto AEAD operation strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_aead_operation_strings[] = {
+ [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
+ [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
+};
+
+/**
+ * Asymmetric crypto transform operation strings identifiers.
+ */
+const char *rte_crypto_asym_xform_strings[] = {
+ [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
+ [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
+ [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
+ [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
+ [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
+ [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
+ [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa",
+ [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm",
+};
+
+/**
+ * Asymmetric crypto operation strings identifiers.
+ */
+const char *rte_crypto_asym_op_strings[] = {
+ [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
+ [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
+ [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
+ [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
+ [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
+ [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
+ [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
+};
+
+/**
+ * The private data structure stored in the session mempool private data.
+ */
+struct rte_cryptodev_sym_session_pool_private_data {
+ uint16_t nb_drivers;
+ /**< number of elements in sess_data array */
+ uint16_t user_data_sz;
+ /**< session user data will be placed after sess_data */
+};
+
+int
+rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
+ const char *algo_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
+ if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
+ *algo_enum = (enum rte_crypto_cipher_algorithm) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
+int
+rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
+ const char *algo_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
+ if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
+ *algo_enum = (enum rte_crypto_auth_algorithm) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
+int
+rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
+ const char *algo_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
+ if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
+ *algo_enum = (enum rte_crypto_aead_algorithm) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
+int
+rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
+ const char *xform_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
+ if (strcmp(xform_string,
+ rte_crypto_asym_xform_strings[i]) == 0) {
+ *xform_enum = (enum rte_crypto_asym_xform_type) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
+/**
+ * The crypto auth operation strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_auth_operation_strings[] = {
+ [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
+ [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
+};
+
+const struct rte_cryptodev_symmetric_capability __vsym *
+rte_cryptodev_sym_capability_get_v20(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx)
+{
+ const struct rte_cryptodev_capabilities *capability;
+ struct rte_cryptodev_info dev_info;
+ int i = 0;
+
+ rte_cryptodev_info_get_v20(dev_id, &dev_info);
+
+ while ((capability = &dev_info.capabilities[i++])->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != idx->type)
+ continue;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ capability->sym.auth.algo == idx->algo.auth)
+ return &capability->sym;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ capability->sym.cipher.algo == idx->algo.cipher)
+ return &capability->sym;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ capability->sym.aead.algo == idx->algo.aead)
+ return &capability->sym;
+ }
+
+ return NULL;
+}
+VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0);
+
+const struct rte_cryptodev_symmetric_capability __vsym *
+rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx)
+{
+ const struct rte_cryptodev_capabilities *capability;
+ struct rte_cryptodev_info dev_info;
+ int i = 0;
+
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ while ((capability = &dev_info.capabilities[i++])->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != idx->type)
+ continue;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ capability->sym.auth.algo == idx->algo.auth)
+ return &capability->sym;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ capability->sym.cipher.algo == idx->algo.cipher)
+ return &capability->sym;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ capability->sym.aead.algo == idx->algo.aead)
+ return &capability->sym;
+ }
+
+ return NULL;
+}
+MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability *
+ rte_cryptodev_sym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx),
+ rte_cryptodev_sym_capability_get_v21);
+BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21);
+
+static int
+param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
+{
+ unsigned int next_size;
+
+ /* Check lower/upper bounds */
+ if (size < range->min)
+ return -1;
+
+ if (size > range->max)
+ return -1;
+
+ /* If range is actually only one value, size is correct */
+ if (range->increment == 0)
+ return 0;
+
+ /* Check if value is one of the supported sizes */
+ for (next_size = range->min; next_size <= range->max;
+ next_size += range->increment)
+ if (size == next_size)
+ return 0;
+
+ return -1;
+}
+
+const struct rte_cryptodev_asymmetric_xform_capability *
+rte_cryptodev_asym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_asym_capability_idx *idx)
+{
+ const struct rte_cryptodev_capabilities *capability;
+ struct rte_cryptodev_info dev_info;
+ unsigned int i = 0;
+
+ memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ while ((capability = &dev_info.capabilities[i++])->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ continue;
+
+ if (capability->asym.xform_capa.xform_type == idx->type)
+ return &capability->asym.xform_capa;
+ }
+ return NULL;
+};
+
+int
+rte_cryptodev_sym_capability_check_cipher(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t iv_size)
+{
+ if (param_range_check(key_size, &capability->cipher.key_size) != 0)
+ return -1;
+
+ if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
+ return -1;
+
+ return 0;
+}
+
+int
+rte_cryptodev_sym_capability_check_auth(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
+{
+ if (param_range_check(key_size, &capability->auth.key_size) != 0)
+ return -1;
+
+ if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
+ return -1;
+
+ if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
+ return -1;
+
+ return 0;
+}
+
+int
+rte_cryptodev_sym_capability_check_aead(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
+ uint16_t iv_size)
+{
+ if (param_range_check(key_size, &capability->aead.key_size) != 0)
+ return -1;
+
+ if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
+ return -1;
+
+ if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
+ return -1;
+
+ if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
+ return -1;
+
+ return 0;
+}
+int
+rte_cryptodev_asym_xform_capability_check_optype(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ enum rte_crypto_asym_op_type op_type)
+{
+ if (capability->op_types & (1 << op_type))
+ return 1;
+
+ return 0;
+}
+
+int
+rte_cryptodev_asym_xform_capability_check_modlen(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ uint16_t modlen)
+{
+ /* no need to check for limits, if min or max = 0 */
+ if (capability->modlen.min != 0) {
+ if (modlen < capability->modlen.min)
+ return -1;
+ }
+
+ if (capability->modlen.max != 0) {
+ if (modlen > capability->modlen.max)
+ return -1;
+ }
+
+ /* in any case, check if given modlen is module increment */
+ if (capability->modlen.increment != 0) {
+ if (modlen % (capability->modlen.increment))
+ return -1;
+ }
+
+ return 0;
+}
+
+
+const char *
+rte_cryptodev_get_feature_name(uint64_t flag)
+{
+ switch (flag) {
+ case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
+ return "SYMMETRIC_CRYPTO";
+ case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
+ return "ASYMMETRIC_CRYPTO";
+ case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
+ return "SYM_OPERATION_CHAINING";
+ case RTE_CRYPTODEV_FF_CPU_SSE:
+ return "CPU_SSE";
+ case RTE_CRYPTODEV_FF_CPU_AVX:
+ return "CPU_AVX";
+ case RTE_CRYPTODEV_FF_CPU_AVX2:
+ return "CPU_AVX2";
+ case RTE_CRYPTODEV_FF_CPU_AVX512:
+ return "CPU_AVX512";
+ case RTE_CRYPTODEV_FF_CPU_AESNI:
+ return "CPU_AESNI";
+ case RTE_CRYPTODEV_FF_HW_ACCELERATED:
+ return "HW_ACCELERATED";
+ case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
+ return "IN_PLACE_SGL";
+ case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
+ return "OOP_SGL_IN_SGL_OUT";
+ case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
+ return "OOP_SGL_IN_LB_OUT";
+ case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
+ return "OOP_LB_IN_SGL_OUT";
+ case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
+ return "OOP_LB_IN_LB_OUT";
+ case RTE_CRYPTODEV_FF_CPU_NEON:
+ return "CPU_NEON";
+ case RTE_CRYPTODEV_FF_CPU_ARM_CE:
+ return "CPU_ARM_CE";
+ case RTE_CRYPTODEV_FF_SECURITY:
+ return "SECURITY_PROTOCOL";
+ case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
+ return "RSA_PRIV_OP_KEY_EXP";
+ case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
+ return "RSA_PRIV_OP_KEY_QT";
+ case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
+ return "DIGEST_ENCRYPTED";
+ case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
+ return "SYM_CPU_CRYPTO";
+ case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
+ return "ASYM_SESSIONLESS";
+ case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
+ return "SYM_SESSIONLESS";
+ case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
+ return "NON_BYTE_ALIGNED_DATA";
+ default:
+ return NULL;
+ }
+}
+
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_dev(uint8_t dev_id)
+{
+ return &cryptodev_globals.devs[dev_id];
+}
+
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_named_dev(const char *name)
+{
+ struct rte_cryptodev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
+ dev = &cryptodev_globals.devs[i];
+
+ if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static inline uint8_t
+rte_cryptodev_is_valid_device_data(uint8_t dev_id)
+{
+ if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
+ rte_crypto_devices[dev_id].data == NULL)
+ return 0;
+
+ return 1;
+}
+
+unsigned int
+rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev = NULL;
+
+ if (!rte_cryptodev_is_valid_device_data(dev_id))
+ return 0;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (dev->attached != RTE_CRYPTODEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+
+int
+rte_cryptodev_get_dev_id(const char *name)
+{
+ unsigned i;
+
+ if (name == NULL)
+ return -1;
+
+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
+ if (!rte_cryptodev_is_valid_device_data(i))
+ continue;
+ if ((strcmp(cryptodev_globals.devs[i].data->name, name)
+ == 0) &&
+ (cryptodev_globals.devs[i].attached ==
+ RTE_CRYPTODEV_ATTACHED))
+ return i;
+ }
+
+ return -1;
+}
+
+uint8_t
+rte_cryptodev_count(void)
+{
+ return cryptodev_globals.nb_devs;
+}
+
+uint8_t
+rte_cryptodev_device_count_by_driver(uint8_t driver_id)
+{
+ uint8_t i, dev_count = 0;
+
+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
+ if (cryptodev_globals.devs[i].driver_id == driver_id &&
+ cryptodev_globals.devs[i].attached ==
+ RTE_CRYPTODEV_ATTACHED)
+ dev_count++;
+
+ return dev_count;
+}
+
+uint8_t
+rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices)
+{
+ uint8_t i, count = 0;
+ struct rte_cryptodev *devs = cryptodev_globals.devs;
+
+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
+ if (!rte_cryptodev_is_valid_device_data(i))
+ continue;
+
+ if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
+ int cmp;
+
+ cmp = strncmp(devs[i].device->driver->name,
+ driver_name,
+ strlen(driver_name) + 1);
+
+ if (cmp == 0)
+ devices[count++] = devs[i].data->dev_id;
+ }
+ }
+
+ return count;
+}
+
+void *
+rte_cryptodev_get_sec_ctx(uint8_t dev_id)
+{
+ if (dev_id < RTE_CRYPTO_MAX_DEVS &&
+ (rte_crypto_devices[dev_id].feature_flags &
+ RTE_CRYPTODEV_FF_SECURITY))
+ return rte_crypto_devices[dev_id].security_ctx;
+
+ return NULL;
+}
+
+int
+rte_cryptodev_socket_id(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+ return -1;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ return dev->data->socket_id;
+}
+
+static inline int
+rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_cryptodev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_cryptodev_data));
+
+ return 0;
+}
+
+static inline int
+rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ RTE_ASSERT(*data == mz->addr);
+ *data = NULL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return rte_memzone_free(mz);
+
+ return 0;
+}
+
+static uint8_t
+rte_cryptodev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
+ if (rte_crypto_devices[dev_id].attached ==
+ RTE_CRYPTODEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_CRYPTO_MAX_DEVS;
+}
+
+struct rte_cryptodev *
+rte_cryptodev_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_cryptodev *cryptodev;
+ uint8_t dev_id;
+
+ if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
+ CDEV_LOG_ERR("Crypto device with name %s already "
+ "allocated!", name);
+ return NULL;
+ }
+
+ dev_id = rte_cryptodev_find_free_device_index();
+ if (dev_id == RTE_CRYPTO_MAX_DEVS) {
+ CDEV_LOG_ERR("Reached maximum number of crypto devices");
+ return NULL;
+ }
+
+ cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (cryptodev->data == NULL) {
+ struct rte_cryptodev_data **cryptodev_data =
+ &cryptodev_globals.data[dev_id];
+
+ int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
+ socket_id);
+
+ if (retval < 0 || *cryptodev_data == NULL)
+ return NULL;
+
+ cryptodev->data = *cryptodev_data;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ strlcpy(cryptodev->data->name, name,
+ RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ cryptodev->data->dev_id = dev_id;
+ cryptodev->data->socket_id = socket_id;
+ cryptodev->data->dev_started = 0;
+ }
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
+
+ cryptodev_globals.nb_devs++;
+ }
+
+ return cryptodev;
+}
+
+int
+rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
+{
+ int ret;
+ uint8_t dev_id;
+
+ if (cryptodev == NULL)
+ return -EINVAL;
+
+ dev_id = cryptodev->data->dev_id;
+
+ /* Close device only if device operations have been set */
+ if (cryptodev->dev_ops) {
+ ret = rte_cryptodev_close(dev_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
+ if (ret < 0)
+ return ret;
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+ cryptodev_globals.nb_devs--;
+ return 0;
+}
+
+uint16_t
+rte_cryptodev_queue_pair_count(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_is_valid_device_data(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return 0;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ return dev->data->nb_queue_pairs;
+}
+
+static int
+rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
+ int socket_id)
+{
+ struct rte_cryptodev_info dev_info;
+ void **qp;
+ unsigned i;
+
+ if ((dev == NULL) || (nb_qpairs < 1)) {
+ CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
+ dev, nb_qpairs);
+ return -EINVAL;
+ }
+
+ CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
+ nb_qpairs, dev->data->dev_id);
+
+ memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+
+ if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
+ CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
+ nb_qpairs, dev->data->dev_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->queue_pairs == NULL) { /* first time configuration */
+ dev->data->queue_pairs = rte_zmalloc_socket(
+ "cryptodev->queue_pairs",
+ sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (dev->data->queue_pairs == NULL) {
+ dev->data->nb_queue_pairs = 0;
+ CDEV_LOG_ERR("failed to get memory for qp meta data, "
+ "nb_queues %u",
+ nb_qpairs);
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ int ret;
+ uint16_t old_nb_queues = dev->data->nb_queue_pairs;
+
+ qp = dev->data->queue_pairs;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
+ -ENOTSUP);
+
+ for (i = nb_qpairs; i < old_nb_queues; i++) {
+ ret = (*dev->dev_ops->queue_pair_release)(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
+ RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ CDEV_LOG_ERR("failed to realloc qp meta data,"
+ " nb_queues %u", nb_qpairs);
+ return -(ENOMEM);
+ }
+
+ if (nb_qpairs > old_nb_queues) {
+ uint16_t new_qs = nb_qpairs - old_nb_queues;
+
+ memset(qp + old_nb_queues, 0,
+ sizeof(qp[0]) * new_qs);
+ }
+
+ dev->data->queue_pairs = qp;
+
+ }
+ dev->data->nb_queue_pairs = nb_qpairs;
+ return 0;
+}
+
+int
+rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
+{
+ struct rte_cryptodev *dev;
+ int diag;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ if (dev->data->dev_started) {
+ CDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ /* Setup new number of queue pairs and reconfigure device. */
+ diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
+ config->socket_id);
+ if (diag != 0) {
+ CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ rte_cryptodev_trace_configure(dev_id, config);
+ return (*dev->dev_ops->dev_configure)(dev, config);
+}
+
+
+int
+rte_cryptodev_start(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+ int diag;
+
+ CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
+ dev_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ rte_cryptodev_trace_start(dev_id, diag);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ return 0;
+}
+
+void
+rte_cryptodev_stop(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
+ dev_id);
+ return;
+ }
+
+ (*dev->dev_ops->dev_stop)(dev);
+ rte_cryptodev_trace_stop(dev_id);
+ dev->data->dev_started = 0;
+}
+
+int
+rte_cryptodev_close(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+ int retval;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -1;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ CDEV_LOG_ERR("Device %u must be stopped before closing",
+ dev_id);
+ return -EBUSY;
+ }
+
+ /* We can't close the device if there are outstanding sessions in use */
+ if (dev->data->session_pool != NULL) {
+ if (!rte_mempool_full(dev->data->session_pool)) {
+ CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
+ "has sessions still in use, free "
+ "all sessions before calling close",
+ (unsigned)dev_id);
+ return -EBUSY;
+ }
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+ retval = (*dev->dev_ops->dev_close)(dev);
+ rte_cryptodev_trace_close(dev_id, retval);
+
+ if (capability_copy[dev_id]) {
+ free(capability_copy[dev_id]);
+ capability_copy[dev_id] = NULL;
+ }
+ is_capability_checked[dev_id] = 0;
+
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+int
+rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ if (queue_pair_id >= dev->data->nb_queue_pairs) {
+ CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+ return -EINVAL;
+ }
+
+ if (!qp_conf) {
+ CDEV_LOG_ERR("qp_conf cannot be NULL\n");
+ return -EINVAL;
+ }
+
+ if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
+ (!qp_conf->mp_session && qp_conf->mp_session_private)) {
+ CDEV_LOG_ERR("Invalid mempools\n");
+ return -EINVAL;
+ }
+
+ if (qp_conf->mp_session) {
+ struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
+ uint32_t obj_size = qp_conf->mp_session->elt_size;
+ uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
+ struct rte_cryptodev_sym_session s = {0};
+
+ pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
+ if (!pool_priv || qp_conf->mp_session->private_data_size <
+ sizeof(*pool_priv)) {
+ CDEV_LOG_ERR("Invalid mempool\n");
+ return -EINVAL;
+ }
+
+ s.nb_drivers = pool_priv->nb_drivers;
+ s.user_data_sz = pool_priv->user_data_sz;
+
+ if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
+ obj_size) || (s.nb_drivers <= dev->driver_id) ||
+ rte_cryptodev_sym_get_private_session_size(dev_id) >
+ obj_priv_size) {
+ CDEV_LOG_ERR("Invalid mempool\n");
+ return -EINVAL;
+ }
+ }
+
+ if (dev->data->dev_started) {
+ CDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
+
+ rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
+ return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
+ socket_id);
+}
+
+
+int
+rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return -ENODEV;
+ }
+
+ if (stats == NULL) {
+ CDEV_LOG_ERR("Invalid stats ptr");
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ memset(stats, 0, sizeof(*stats));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ (*dev->dev_ops->stats_get)(dev, stats);
+ return 0;
+}
+
+void
+rte_cryptodev_stats_reset(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ (*dev->dev_ops->stats_reset)(dev);
+}
+
+static void
+get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ const struct rte_cryptodev_capabilities *capability;
+ uint8_t found_invalid_capa = 0;
+ uint8_t counter = 0;
+
+ for (capability = dev_info->capabilities;
+ capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ ++capability, ++counter) {
+ if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+ capability->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_AEAD
+ && capability->sym.aead.algo >=
+ RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
+ found_invalid_capa = 1;
+ counter--;
+ }
+ }
+ is_capability_checked[dev_id] = 1;
+ if (!found_invalid_capa)
+ return;
+ capability_copy[dev_id] = malloc(counter *
+ sizeof(struct rte_cryptodev_capabilities));
+ if (capability_copy[dev_id] == NULL) {
+ /*
+ * error case - no memory to store the trimmed
+ * list, so have to return an empty list
+ */
+ dev_info->capabilities =
+ cryptodev_undefined_capabilities;
+ is_capability_checked[dev_id] = 0;
+ } else {
+ counter = 0;
+ for (capability = dev_info->capabilities;
+ capability->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ capability++) {
+ if (!(capability->op ==
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC
+ && capability->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_AEAD
+ && capability->sym.aead.algo >=
+ RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) {
+ capability_copy[dev_id][counter++] =
+ *capability;
+ }
+ }
+ dev_info->capabilities =
+ capability_copy[dev_id];
+ }
+}
+
+void __vsym
+rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ if (capability_copy[dev_id] == NULL) {
+ if (!is_capability_checked[dev_id])
+ get_v20_capabilities(dev_id, dev_info);
+ } else
+ dev_info->capabilities = capability_copy[dev_id];
+
+ dev_info->driver_name = dev->device->driver->name;
+ dev_info->device = dev->device;
+}
+VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0);
+
+void __vsym
+rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+
+ memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ dev_info->driver_name = dev->device->driver->name;
+ dev_info->device = dev->device;
+}
+MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id,
+ struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21);
+BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21);
+
+int
+rte_cryptodev_callback_register(uint8_t dev_id,
+ enum rte_cryptodev_event_type event,
+ rte_cryptodev_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_callback *user_cb;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ rte_spinlock_lock(&rte_cryptodev_cb_lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
+ if (user_cb->cb_fn == cb_fn &&
+ user_cb->cb_arg == cb_arg &&
+ user_cb->event == event) {
+ break;
+ }
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL) {
+ user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_cryptodev_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
+ }
+ }
+
+ rte_spinlock_unlock(&rte_cryptodev_cb_lock);
+ return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int
+rte_cryptodev_callback_unregister(uint8_t dev_id,
+ enum rte_cryptodev_event_type event,
+ rte_cryptodev_cb_fn cb_fn, void *cb_arg)
+{
+ int ret;
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_callback *cb, *next;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ rte_spinlock_lock(&rte_cryptodev_cb_lock);
+
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 &&
+ cb->cb_arg != cb_arg))
+ continue;
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
+ }
+ }
+
+ rte_spinlock_unlock(&rte_cryptodev_cb_lock);
+ return ret;
+}
+
+void
+rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
+ enum rte_cryptodev_event_type event)
+{
+ struct rte_cryptodev_callback *cb_lst;
+ struct rte_cryptodev_callback dev_cb;
+
+ rte_spinlock_lock(&rte_cryptodev_cb_lock);
+ TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
+ if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+ continue;
+ dev_cb = *cb_lst;
+ cb_lst->active = 1;
+ rte_spinlock_unlock(&rte_cryptodev_cb_lock);
+ dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
+ dev_cb.cb_arg);
+ rte_spinlock_lock(&rte_cryptodev_cb_lock);
+ cb_lst->active = 0;
+ }
+ rte_spinlock_unlock(&rte_cryptodev_cb_lock);
+}
+
+
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mp)
+{
+ struct rte_cryptodev *dev;
+ uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
+ dev_id);
+ uint8_t index;
+ int ret;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (sess == NULL || xforms == NULL || dev == NULL)
+ return -EINVAL;
+
+ if (mp->elt_size < sess_priv_sz)
+ return -EINVAL;
+
+ index = dev->driver_id;
+ if (index >= sess->nb_drivers)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
+
+ if (sess->sess_data[index].refcnt == 0) {
+ ret = dev->dev_ops->sym_session_configure(dev, xforms,
+ sess, mp);
+ if (ret < 0) {
+ CDEV_LOG_ERR(
+ "dev_id %d failed to configure session details",
+ dev_id);
+ return ret;
+ }
+ }
+
+ rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
+ sess->sess_data[index].refcnt++;
+ return 0;
+}
+
+int
+rte_cryptodev_asym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_crypto_asym_xform *xforms,
+ struct rte_mempool *mp)
+{
+ struct rte_cryptodev *dev;
+ uint8_t index;
+ int ret;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (sess == NULL || xforms == NULL || dev == NULL)
+ return -EINVAL;
+
+ index = dev->driver_id;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
+ -ENOTSUP);
+
+ if (sess->sess_private_data[index] == NULL) {
+ ret = dev->dev_ops->asym_session_configure(dev,
+ xforms,
+ sess, mp);
+ if (ret < 0) {
+ CDEV_LOG_ERR(
+ "dev_id %d failed to configure session details",
+ dev_id);
+ return ret;
+ }
+ }
+
+ rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
+ return 0;
+}
+
+struct rte_mempool *
+rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
+ int socket_id)
+{
+ struct rte_mempool *mp;
+ struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
+ uint32_t obj_sz;
+
+ obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
+ if (obj_sz > elt_size)
+ CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
+ obj_sz);
+ else
+ obj_sz = elt_size;
+
+ mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
+ (uint32_t)(sizeof(*pool_priv)),
+ NULL, NULL, NULL, NULL,
+ socket_id, 0);
+ if (mp == NULL) {
+ CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
+ __func__, name, rte_errno);
+ return NULL;
+ }
+
+ pool_priv = rte_mempool_get_priv(mp);
+ if (!pool_priv) {
+ CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
+ __func__, name);
+ rte_mempool_free(mp);
+ return NULL;
+ }
+
+ pool_priv->nb_drivers = nb_drivers;
+ pool_priv->user_data_sz = user_data_size;
+
+ rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
+ elt_size, cache_size, user_data_size, mp);
+ return mp;
+}
+
+static unsigned int
+rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
+{
+ return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
+ sess->user_data_sz;
+}
+
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(struct rte_mempool *mp)
+{
+ struct rte_cryptodev_sym_session *sess;
+ struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
+
+ if (!mp) {
+ CDEV_LOG_ERR("Invalid mempool\n");
+ return NULL;
+ }
+
+ pool_priv = rte_mempool_get_priv(mp);
+
+ if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
+ CDEV_LOG_ERR("Invalid mempool\n");
+ return NULL;
+ }
+
+ /* Allocate a session structure from the session pool */
+ if (rte_mempool_get(mp, (void **)&sess)) {
+ CDEV_LOG_ERR("couldn't get object from session mempool");
+ return NULL;
+ }
+
+ sess->nb_drivers = pool_priv->nb_drivers;
+ sess->user_data_sz = pool_priv->user_data_sz;
+ sess->opaque_data = 0;
+
+ /* Clear device session pointer.
+ * Include the flag indicating presence of user data
+ */
+ memset(sess->sess_data, 0,
+ rte_cryptodev_sym_session_data_size(sess));
+
+ rte_cryptodev_trace_sym_session_create(mp, sess);
+ return sess;
+}
+
+struct rte_cryptodev_asym_session *
+rte_cryptodev_asym_session_create(struct rte_mempool *mp)
+{
+ struct rte_cryptodev_asym_session *sess;
+
+ /* Allocate a session structure from the session pool */
+ if (rte_mempool_get(mp, (void **)&sess)) {
+ CDEV_LOG_ERR("couldn't get object from session mempool");
+ return NULL;
+ }
+
+ /* Clear device session pointer.
+ * Include the flag indicating presence of private data
+ */
+ memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
+
+ rte_cryptodev_trace_asym_session_create(mp, sess);
+ return sess;
+}
+
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct rte_cryptodev *dev;
+ uint8_t driver_id;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (dev == NULL || sess == NULL)
+ return -EINVAL;
+
+ driver_id = dev->driver_id;
+ if (sess->sess_data[driver_id].refcnt == 0)
+ return 0;
+ if (--sess->sess_data[driver_id].refcnt != 0)
+ return -EBUSY;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
+
+ dev->dev_ops->sym_session_clear(dev, sess);
+
+ rte_cryptodev_trace_sym_session_clear(dev_id, sess);
+ return 0;
+}
+
+int
+rte_cryptodev_asym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (dev == NULL || sess == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
+
+ dev->dev_ops->asym_session_clear(dev, sess);
+
+ rte_cryptodev_trace_sym_session_clear(dev_id, sess);
+ return 0;
+}
+
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t i;
+ struct rte_mempool *sess_mp;
+
+ if (sess == NULL)
+ return -EINVAL;
+
+ /* Check that all device private data has been freed */
+ for (i = 0; i < sess->nb_drivers; i++) {
+ if (sess->sess_data[i].refcnt != 0)
+ return -EBUSY;
+ }
+
+ /* Return session to mempool */
+ sess_mp = rte_mempool_from_obj(sess);
+ rte_mempool_put(sess_mp, sess);
+
+ rte_cryptodev_trace_sym_session_free(sess);
+ return 0;
+}
+
+int
+rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t i;
+ void *sess_priv;
+ struct rte_mempool *sess_mp;
+
+ if (sess == NULL)
+ return -EINVAL;
+
+ /* Check that all device private data has been freed */
+ for (i = 0; i < nb_drivers; i++) {
+ sess_priv = get_asym_session_private_data(sess, i);
+ if (sess_priv != NULL)
+ return -EBUSY;
+ }
+
+ /* Return session to mempool */
+ sess_mp = rte_mempool_from_obj(sess);
+ rte_mempool_put(sess_mp, sess);
+
+ rte_cryptodev_trace_asym_session_free(sess);
+ return 0;
+}
+
+unsigned int
+rte_cryptodev_sym_get_header_session_size(void)
+{
+ /*
+ * Header contains pointers to the private data of all registered
+ * drivers and all necessary information to ensure safely clear
+ * or free al session.
+ */
+ struct rte_cryptodev_sym_session s = {0};
+
+ s.nb_drivers = nb_drivers;
+
+ return (unsigned int)(sizeof(s) +
+ rte_cryptodev_sym_session_data_size(&s));
+}
+
+unsigned int
+rte_cryptodev_sym_get_existing_header_session_size(
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (!sess)
+ return 0;
+ else
+ return (unsigned int)(sizeof(*sess) +
+ rte_cryptodev_sym_session_data_size(sess));
+}
+
+unsigned int
+rte_cryptodev_asym_get_header_session_size(void)
+{
+ /*
+ * Header contains pointers to the private data
+ * of all registered drivers, and a flag which
+ * indicates presence of private data
+ */
+ return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
+}
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+ unsigned int priv_sess_size;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+ return 0;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (*dev->dev_ops->sym_session_get_size == NULL)
+ return 0;
+
+ priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
+
+ return priv_sess_size;
+}
+
+unsigned int
+rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+ unsigned int header_size = sizeof(void *) * nb_drivers;
+ unsigned int priv_sess_size;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+ return 0;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (*dev->dev_ops->asym_session_get_size == NULL)
+ return 0;
+
+ priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
+ if (priv_sess_size < header_size)
+ return header_size;
+
+ return priv_sess_size;
+
+}
+
+int
+rte_cryptodev_sym_session_set_user_data(
+ struct rte_cryptodev_sym_session *sess,
+ void *data,
+ uint16_t size)
+{
+ if (sess == NULL)
+ return -EINVAL;
+
+ if (sess->user_data_sz < size)
+ return -ENOMEM;
+
+ rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
+ return 0;
+}
+
+void *
+rte_cryptodev_sym_session_get_user_data(
+ struct rte_cryptodev_sym_session *sess)
+{
+ if (sess == NULL || sess->user_data_sz == 0)
+ return NULL;
+
+ return (void *)(sess->sess_data + sess->nb_drivers);
+}
+
+static inline void
+sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
+{
+ uint32_t i;
+ for (i = 0; i < vec->num; i++)
+ vec->status[i] = errnum;
+}
+
+uint32_t
+rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_sym_vec *vec)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ sym_crypto_fill_status(vec, EINVAL);
+ return 0;
+ }
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (*dev->dev_ops->sym_cpu_process == NULL ||
+ !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
+ sym_crypto_fill_status(vec, ENOTSUP);
+ return 0;
+ }
+
+ return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
+}
+
+/** Initialise rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->phys_addr = rte_mem_virt2iova(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+
+ unsigned elt_size = sizeof(struct rte_crypto_op) +
+ priv_size;
+
+ if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ elt_size += sizeof(struct rte_crypto_sym_op);
+ } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+ elt_size += sizeof(struct rte_crypto_asym_op);
+ } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
+ sizeof(struct rte_crypto_asym_op));
+ } else {
+ CDEV_LOG_ERR("Invalid op_type\n");
+ return NULL;
+ }
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->priv_size < priv_size) {
+ mp = NULL;
+ CDEV_LOG_ERR("Mempool %s already exists but with "
+ "incompatible parameters", name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ CDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
+
+int
+rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
+{
+ struct rte_cryptodev *dev = NULL;
+ uint32_t i = 0;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
+ int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "%s_%u", dev_name_prefix, i);
+
+ if (ret < 0)
+ return ret;
+
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (!dev)
+ return 0;
+ }
+
+ return -1;
+}
+
+TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
+
+static struct cryptodev_driver_list cryptodev_driver_list =
+ TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
+
+int
+rte_cryptodev_driver_id_get(const char *name)
+{
+ struct cryptodev_driver *driver;
+ const char *driver_name;
+
+ if (name == NULL) {
+ RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
+ return -1;
+ }
+
+ TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
+ driver_name = driver->driver->name;
+ if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
+ return driver->id;
+ }
+ return -1;
+}
+
+const char *
+rte_cryptodev_name_get(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_is_valid_device_data(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return NULL;
+ }
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (dev == NULL)
+ return NULL;
+
+ return dev->data->name;
+}
+
+const char *
+rte_cryptodev_driver_name_get(uint8_t driver_id)
+{
+ struct cryptodev_driver *driver;
+
+ TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
+ if (driver->id == driver_id)
+ return driver->driver->name;
+ return NULL;
+}
+
+uint8_t
+rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
+ const struct rte_driver *drv)
+{
+ crypto_drv->driver = drv;
+ crypto_drv->id = nb_drivers;
+
+ TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
+
+ return nb_drivers++;
+}
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.h
new file mode 100644
index 000000000..4aaee7330
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev.h
@@ -0,0 +1,1341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020 Intel Corporation.
+ */
+
+#ifndef _RTE_CRYPTODEV_H_
+#define _RTE_CRYPTODEV_H_
+
+/**
+ * @file rte_cryptodev.h
+ *
+ * RTE Cryptographic Device APIs
+ *
+ * Defines RTE Crypto Device APIs for the provisioning of cipher and
+ * authentication operations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_kvargs.h"
+#include "rte_crypto.h"
+#include "rte_dev.h"
+#include <rte_common.h>
+#include <rte_config.h>
+
+#include "rte_cryptodev_trace_fp.h"
+
+extern const char **rte_cyptodev_names;
+
+/* Logging Macros */
+
+#define CDEV_LOG_ERR(...) \
+ RTE_LOG(ERR, CRYPTODEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define CDEV_LOG_INFO(...) \
+ RTE_LOG(INFO, CRYPTODEV, \
+ RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define CDEV_LOG_DEBUG(...) \
+ RTE_LOG(DEBUG, CRYPTODEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define CDEV_PMD_TRACE(...) \
+ RTE_LOG(DEBUG, CRYPTODEV, \
+ RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+/**
+ * A macro that points to an offset from the start
+ * of the crypto operation structure (rte_crypto_op)
+ *
+ * The returned pointer is cast to type t.
+ *
+ * @param c
+ * The crypto operation.
+ * @param o
+ * The offset from the start of the crypto operation.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_crypto_op_ctod_offset(c, t, o) \
+ ((t)((char *)(c) + (o)))
+
+/**
+ * A macro that returns the physical address that points
+ * to an offset from the start of the crypto operation
+ * (rte_crypto_op)
+ *
+ * @param c
+ * The crypto operation.
+ * @param o
+ * The offset from the start of the crypto operation
+ * to calculate address from.
+ */
+#define rte_crypto_op_ctophys_offset(c, o) \
+ (rte_iova_t)((c)->phys_addr + (o))
+
+/**
+ * Crypto parameters range description
+ */
+struct rte_crypto_param_range {
+ uint16_t min; /**< minimum size */
+ uint16_t max; /**< maximum size */
+ uint16_t increment;
+ /**< if a range of sizes are supported,
+ * this parameter is used to indicate
+ * increments in byte size that are supported
+ * between the minimum and maximum
+ */
+};
+
+/**
+ * Symmetric Crypto Capability
+ */
+struct rte_cryptodev_symmetric_capability {
+ enum rte_crypto_sym_xform_type xform_type;
+ /**< Transform type : Authentication / Cipher / AEAD */
+ RTE_STD_C11
+ union {
+ struct {
+ enum rte_crypto_auth_algorithm algo;
+ /**< authentication algorithm */
+ uint16_t block_size;
+ /**< algorithm block size */
+ struct rte_crypto_param_range key_size;
+ /**< auth key size range */
+ struct rte_crypto_param_range digest_size;
+ /**< digest size range */
+ struct rte_crypto_param_range aad_size;
+ /**< Additional authentication data size range */
+ struct rte_crypto_param_range iv_size;
+ /**< Initialisation vector data size range */
+ } auth;
+ /**< Symmetric Authentication transform capabilities */
+ struct {
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+ uint16_t block_size;
+ /**< algorithm block size */
+ struct rte_crypto_param_range key_size;
+ /**< cipher key size range */
+ struct rte_crypto_param_range iv_size;
+ /**< Initialisation vector data size range */
+ } cipher;
+ /**< Symmetric Cipher transform capabilities */
+ struct {
+ enum rte_crypto_aead_algorithm algo;
+ /**< AEAD algorithm */
+ uint16_t block_size;
+ /**< algorithm block size */
+ struct rte_crypto_param_range key_size;
+ /**< AEAD key size range */
+ struct rte_crypto_param_range digest_size;
+ /**< digest size range */
+ struct rte_crypto_param_range aad_size;
+ /**< Additional authentication data size range */
+ struct rte_crypto_param_range iv_size;
+ /**< Initialisation vector data size range */
+ } aead;
+ };
+};
+
+/**
+ * Asymmetric Xform Crypto Capability
+ *
+ */
+struct rte_cryptodev_asymmetric_xform_capability {
+ enum rte_crypto_asym_xform_type xform_type;
+ /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
+
+ uint32_t op_types;
+ /**< bitmask for supported rte_crypto_asym_op_type */
+
+ __extension__
+ union {
+ struct rte_crypto_param_range modlen;
+ /**< Range of modulus length supported by modulus based xform.
+ * Value 0 mean implementation default
+ */
+ };
+};
+
+/**
+ * Asymmetric Crypto Capability
+ *
+ */
+struct rte_cryptodev_asymmetric_capability {
+ struct rte_cryptodev_asymmetric_xform_capability xform_capa;
+};
+
+
+/** Structure used to capture a capability of a crypto device */
+struct rte_cryptodev_capabilities {
+ enum rte_crypto_op_type op;
+ /**< Operation type */
+
+ RTE_STD_C11
+ union {
+ struct rte_cryptodev_symmetric_capability sym;
+ /**< Symmetric operation capability parameters */
+ struct rte_cryptodev_asymmetric_capability asym;
+ /**< Asymmetric operation capability parameters */
+ };
+};
+
+/** Structure used to describe crypto algorithms */
+struct rte_cryptodev_sym_capability_idx {
+ enum rte_crypto_sym_xform_type type;
+ union {
+ enum rte_crypto_cipher_algorithm cipher;
+ enum rte_crypto_auth_algorithm auth;
+ enum rte_crypto_aead_algorithm aead;
+ } algo;
+};
+
+/**
+ * Structure used to describe asymmetric crypto xforms
+ * Each xform maps to one asym algorithm.
+ *
+ */
+struct rte_cryptodev_asym_capability_idx {
+ enum rte_crypto_asym_xform_type type;
+ /**< Asymmetric xform (algo) type */
+};
+
+/**
+ * Provide capabilities available for defined device and algorithm
+ *
+ * @param dev_id The identifier of the device.
+ * @param idx Description of crypto algorithms.
+ *
+ * @return
+ * - Return description of the symmetric crypto capability if exist.
+ * - Return NULL if the capability not exist.
+ */
+const struct rte_cryptodev_symmetric_capability *
+rte_cryptodev_sym_capability_get_v20(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx);
+
+const struct rte_cryptodev_symmetric_capability *
+rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx);
+
+const struct rte_cryptodev_symmetric_capability *
+rte_cryptodev_sym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx);
+
+/**
+ * Provide capabilities available for defined device and xform
+ *
+ * @param dev_id The identifier of the device.
+ * @param idx Description of asym crypto xform.
+ *
+ * @return
+ * - Return description of the asymmetric crypto capability if exist.
+ * - Return NULL if the capability not exist.
+ */
+__rte_experimental
+const struct rte_cryptodev_asymmetric_xform_capability *
+rte_cryptodev_asym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_asym_capability_idx *idx);
+
+/**
+ * Check if key size and initial vector are supported
+ * in crypto cipher capability
+ *
+ * @param capability Description of the symmetric crypto capability.
+ * @param key_size Cipher key size.
+ * @param iv_size Cipher initial vector size.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int
+rte_cryptodev_sym_capability_check_cipher(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t iv_size);
+
+/**
+ * Check if key size and initial vector are supported
+ * in crypto auth capability
+ *
+ * @param capability Description of the symmetric crypto capability.
+ * @param key_size Auth key size.
+ * @param digest_size Auth digest size.
+ * @param iv_size Auth initial vector size.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int
+rte_cryptodev_sym_capability_check_auth(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
+
+/**
+ * Check if key, digest, AAD and initial vector sizes are supported
+ * in crypto AEAD capability
+ *
+ * @param capability Description of the symmetric crypto capability.
+ * @param key_size AEAD key size.
+ * @param digest_size AEAD digest size.
+ * @param aad_size AEAD AAD size.
+ * @param iv_size AEAD IV size.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int
+rte_cryptodev_sym_capability_check_aead(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
+ uint16_t iv_size);
+
+/**
+ * Check if op type is supported
+ *
+ * @param capability Description of the asymmetric crypto capability.
+ * @param op_type op type
+ *
+ * @return
+ * - Return 1 if the op type is supported
+ * - Return 0 if unsupported
+ */
+__rte_experimental
+int
+rte_cryptodev_asym_xform_capability_check_optype(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ enum rte_crypto_asym_op_type op_type);
+
+/**
+ * Check if modulus length is in supported range
+ *
+ * @param capability Description of the asymmetric crypto capability.
+ * @param modlen modulus length.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+__rte_experimental
+int
+rte_cryptodev_asym_xform_capability_check_modlen(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ uint16_t modlen);
+
+/**
+ * Provide the cipher algorithm enum, given an algorithm string
+ *
+ * @param algo_enum A pointer to the cipher algorithm
+ * enum to be filled
+ * @param algo_string Authentication algo string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 is the string is valid
+ */
+int
+rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
+ const char *algo_string);
+
+/**
+ * Provide the authentication algorithm enum, given an algorithm string
+ *
+ * @param algo_enum A pointer to the authentication algorithm
+ * enum to be filled
+ * @param algo_string Authentication algo string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 is the string is valid
+ */
+int
+rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
+ const char *algo_string);
+
+/**
+ * Provide the AEAD algorithm enum, given an algorithm string
+ *
+ * @param algo_enum A pointer to the AEAD algorithm
+ * enum to be filled
+ * @param algo_string AEAD algorithm string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 is the string is valid
+ */
+int
+rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
+ const char *algo_string);
+
+/**
+ * Provide the Asymmetric xform enum, given an xform string
+ *
+ * @param xform_enum A pointer to the xform type
+ * enum to be filled
+ * @param xform_string xform string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 if the string is valid
+ */
+__rte_experimental
+int
+rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
+ const char *xform_string);
+
+
+/** Macro used at end of crypto PMD list */
+#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
+ { RTE_CRYPTO_OP_TYPE_UNDEFINED }
+
+
+/**
+ * Crypto device supported feature flags
+ *
+ * Note:
+ * New features flags should be added to the end of the list
+ *
+ * Keep these flags synchronised with rte_cryptodev_get_feature_name()
+ */
+#define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
+/**< Symmetric crypto operations are supported */
+#define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
+/**< Asymmetric crypto operations are supported */
+#define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
+/**< Chaining symmetric crypto operations are supported */
+#define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
+/**< Utilises CPU SIMD SSE instructions */
+#define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
+/**< Utilises CPU SIMD AVX instructions */
+#define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
+/**< Utilises CPU SIMD AVX2 instructions */
+#define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
+/**< Utilises CPU AES-NI instructions */
+#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
+/**< Operations are off-loaded to an
+ * external hardware accelerator
+ */
+#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
+/**< Utilises CPU SIMD AVX512 instructions */
+#define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
+/**< In-place Scatter-gather (SGL) buffers, with multiple segments,
+ * are supported
+ */
+#define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
+/**< Out-of-place Scatter-gather (SGL) buffers are
+ * supported in input and output
+ */
+#define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in input, combined with linear buffers (LB), with a
+ * single segment in output
+ */
+#define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in output, combined with linear buffers (LB) in input
+ */
+#define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
+/**< Out-of-place linear buffers (LB) are supported in input and output */
+#define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
+/**< Utilises CPU NEON instructions */
+#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
+/**< Utilises ARM CPU Cryptographic Extensions */
+#define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
+/**< Support Security Protocol Processing */
+#define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
+/**< Support RSA Private Key OP with exponent */
+#define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
+/**< Support RSA Private Key OP with CRT (quintuple) Keys */
+#define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
+/**< Support encrypted-digest operations where digest is appended to data */
+#define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
+/**< Support asymmetric session-less operations */
+#define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
+/**< Support symmetric cpu-crypto processing */
+#define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
+/**< Support symmetric session-less operations */
+#define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
+/**< Support operations on data which is not byte aligned */
+
+
+/**
+ * Get the name of a crypto device feature flag
+ *
+ * @param flag The mask describing the flag.
+ *
+ * @return
+ * The name of this flag, or NULL if it's not a valid feature flag.
+ */
+
+extern const char *
+rte_cryptodev_get_feature_name(uint64_t flag);
+
+/** Crypto device information */
+struct rte_cryptodev_info {
+ const char *driver_name; /**< Driver name. */
+ uint8_t driver_id; /**< Driver identifier */
+ struct rte_device *device; /**< Generic device information. */
+
+ uint64_t feature_flags;
+ /**< Feature flags exposes HW/SW features for the given device */
+
+ const struct rte_cryptodev_capabilities *capabilities;
+ /**< Array of devices supported capabilities */
+
+ unsigned max_nb_queue_pairs;
+ /**< Maximum number of queues pairs supported by device. */
+
+ uint16_t min_mbuf_headroom_req;
+ /**< Minimum mbuf headroom required by device */
+
+ uint16_t min_mbuf_tailroom_req;
+ /**< Minimum mbuf tailroom required by device */
+
+ struct {
+ unsigned max_nb_sessions;
+ /**< Maximum number of sessions supported by device.
+ * If 0, the device does not have any limitation in
+ * number of sessions that can be used.
+ */
+ } sym;
+};
+
+#define RTE_CRYPTODEV_DETACHED (0)
+#define RTE_CRYPTODEV_ATTACHED (1)
+
+/** Definitions of Crypto device event types */
+enum rte_cryptodev_event_type {
+ RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */
+ RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */
+ RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */
+};
+
+/** Crypto device queue pair configuration structure. */
+struct rte_cryptodev_qp_conf {
+ uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
+ struct rte_mempool *mp_session;
+ /**< The mempool for creating session in sessionless mode */
+ struct rte_mempool *mp_session_private;
+ /**< The mempool for creating sess private data in sessionless mode */
+};
+
+/**
+ * Typedef for application callback function to be registered by application
+ * software for notification of device events
+ *
+ * @param dev_id Crypto device identifier
+ * @param event Crypto device event to register for notification of.
+ * @param cb_arg User specified parameter to be passed as to passed to
+ * users callback function.
+ */
+typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
+ enum rte_cryptodev_event_type event, void *cb_arg);
+
+
+/** Crypto Device statistics */
+struct rte_cryptodev_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+};
+
+#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
+/**< Max length of name of crypto PMD */
+
+/**
+ * Get the device identifier for the named crypto device.
+ *
+ * @param name device name to select the device structure.
+ *
+ * @return
+ * - Returns crypto device identifier on success.
+ * - Return -1 on failure to find named crypto device.
+ */
+extern int
+rte_cryptodev_get_dev_id(const char *name);
+
+/**
+ * Get the crypto device name given a device identifier.
+ *
+ * @param dev_id
+ * The identifier of the device
+ *
+ * @return
+ * - Returns crypto device name.
+ * - Returns NULL if crypto device is not present.
+ */
+extern const char *
+rte_cryptodev_name_get(uint8_t dev_id);
+
+/**
+ * Get the total number of crypto devices that have been successfully
+ * initialised.
+ *
+ * @return
+ * - The total number of usable crypto devices.
+ */
+extern uint8_t
+rte_cryptodev_count(void);
+
+/**
+ * Get number of crypto device defined type.
+ *
+ * @param driver_id driver identifier.
+ *
+ * @return
+ * Returns number of crypto device.
+ */
+extern uint8_t
+rte_cryptodev_device_count_by_driver(uint8_t driver_id);
+
+/**
+ * Get number and identifiers of attached crypto devices that
+ * use the same crypto driver.
+ *
+ * @param driver_name driver name.
+ * @param devices output devices identifiers.
+ * @param nb_devices maximal number of devices.
+ *
+ * @return
+ * Returns number of attached crypto device.
+ */
+uint8_t
+rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices);
+/*
+ * Return the NUMA socket to which a device is connected
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @return
+ * The NUMA socket id to which the device is connected or
+ * a default of zero if the socket could not be determined.
+ * -1 if returned is the dev_id value is out of range.
+ */
+extern int
+rte_cryptodev_socket_id(uint8_t dev_id);
+
+/** Crypto device configuration structure */
+struct rte_cryptodev_config {
+ int socket_id; /**< Socket to allocate resources on */
+ uint16_t nb_queue_pairs;
+ /**< Number of queue pairs to configure on device */
+ uint64_t ff_disable;
+ /**< Feature flags to be disabled. Only the following features are
+ * allowed to be disabled,
+ * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
+ * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
+ * - RTE_CRYTPODEV_FF_SECURITY
+ */
+};
+
+/**
+ * Configure a device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id The identifier of the device to configure.
+ * @param config The crypto device configuration structure.
+ *
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+extern int
+rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
+
+/**
+ * Start an device.
+ *
+ * The device start step is the last one and consists of setting the configured
+ * offload features and in starting the transmit and the receive units of the
+ * device.
+ * On success, all basic functions exported by the API (link status,
+ * receive/transmit, and so on) can be invoked.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @return
+ * - 0: Success, device started.
+ * - <0: Error code of the driver device start function.
+ */
+extern int
+rte_cryptodev_start(uint8_t dev_id);
+
+/**
+ * Stop an device. The device can be restarted with a call to
+ * rte_cryptodev_start()
+ *
+ * @param dev_id The identifier of the device.
+ */
+extern void
+rte_cryptodev_stop(uint8_t dev_id);
+
+/**
+ * Close an device. The device cannot be restarted!
+ *
+ * @param dev_id The identifier of the device.
+ *
+ * @return
+ * - 0 on successfully closing device
+ * - <0 on failure to close device
+ */
+extern int
+rte_cryptodev_close(uint8_t dev_id);
+
+/**
+ * Allocate and set up a receive queue pair for a device.
+ *
+ *
+ * @param dev_id The identifier of the device.
+ * @param queue_pair_id The index of the queue pairs to set up. The
+ * value must be in the range [0, nb_queue_pair
+ * - 1] previously supplied to
+ * rte_cryptodev_configure().
+ * @param qp_conf The pointer to the configuration data to be
+ * used for the queue pair.
+ * @param socket_id The *socket_id* argument is the socket
+ * identifier in case of NUMA. The value can be
+ * *SOCKET_ID_ANY* if there is no NUMA constraint
+ * for the DMA memory allocated for the receive
+ * queue pair.
+ *
+ * @return
+ * - 0: Success, queue pair correctly set up.
+ * - <0: Queue pair configuration failed
+ */
+extern int
+rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
+
+/**
+ * Get the number of queue pairs on a specific crypto device
+ *
+ * @param dev_id Crypto device identifier.
+ * @return
+ * - The number of configured queue pairs.
+ */
+extern uint16_t
+rte_cryptodev_queue_pair_count(uint8_t dev_id);
+
+
+/**
+ * Retrieve the general I/O statistics of a device.
+ *
+ * @param dev_id The identifier of the device.
+ * @param stats A pointer to a structure of type
+ * *rte_cryptodev_stats* to be filled with the
+ * values of device counters.
+ * @return
+ * - Zero if successful.
+ * - Non-zero otherwise.
+ */
+extern int
+rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
+
+/**
+ * Reset the general I/O statistics of a device.
+ *
+ * @param dev_id The identifier of the device.
+ */
+extern void
+rte_cryptodev_stats_reset(uint8_t dev_id);
+
+/**
+ * Retrieve the contextual information of a device.
+ *
+ * @param dev_id The identifier of the device.
+ * @param dev_info A pointer to a structure of type
+ * *rte_cryptodev_info* to be filled with the
+ * contextual information of the device.
+ *
+ * @note The capabilities field of dev_info is set to point to the first
+ * element of an array of struct rte_cryptodev_capabilities. The element after
+ * the last valid element has it's op field set to
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED.
+ */
+
+void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
+
+/* An extra element RTE_CRYPTO_AEAD_CHACHA20_POLY1305 is added
+ * to enum rte_crypto_aead_algorithm, also changing the value of
+ * RTE_CRYPTO_AEAD_LIST_END. To maintain ABI compatibility with applications
+ * which linked against earlier versions, preventing them, for example, from
+ * picking up the new value and using it to index into an array sized too small
+ * for it, it is necessary to have two versions of rte_cryptodev_info_get()
+ * The latest version just returns directly the capabilities retrieved from
+ * the device. The compatible version inspects the capabilities retrieved
+ * from the device, but only returns them directly if the new value
+ * is not included. If the new value is included, it allocates space
+ * for a copy of the device capabilities, trims the new value from this
+ * and returns this copy. It only needs to do this once per device.
+ * For the corner case of a corner case when the alloc may fail,
+ * an empty capability list is returned, as there is no mechanism to return
+ * an error and adding such a mechanism would itself be an ABI breakage.
+ * The compatible version can be removed after the next major ABI release.
+ */
+
+void
+rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
+
+void
+rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
+
+/**
+ * Register a callback function for specific device id.
+ *
+ * @param dev_id Device id.
+ * @param event Event interested.
+ * @param cb_fn User supplied callback function to be called.
+ * @param cb_arg Pointer to the parameters for the registered
+ * callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+extern int
+rte_cryptodev_callback_register(uint8_t dev_id,
+ enum rte_cryptodev_event_type event,
+ rte_cryptodev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific device id.
+ *
+ * @param dev_id The device identifier.
+ * @param event Event interested.
+ * @param cb_fn User supplied callback function to be called.
+ * @param cb_arg Pointer to the parameters for the registered
+ * callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+extern int
+rte_cryptodev_callback_unregister(uint8_t dev_id,
+ enum rte_cryptodev_event_type event,
+ rte_cryptodev_cb_fn cb_fn, void *cb_arg);
+
+
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+/**< Dequeue processed packets from queue pair of a device. */
+
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+/**< Enqueue packets for processing on queue pair of a device. */
+
+
+
+
+struct rte_cryptodev_callback;
+
+/** Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
+
+/** The data structure associated with each crypto device. */
+struct rte_cryptodev {
+ dequeue_pkt_burst_t dequeue_burst;
+ /**< Pointer to PMD receive function. */
+ enqueue_pkt_burst_t enqueue_burst;
+ /**< Pointer to PMD transmit function. */
+
+ struct rte_cryptodev_data *data;
+ /**< Pointer to device data */
+ struct rte_cryptodev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ uint64_t feature_flags;
+ /**< Feature flags exposes HW/SW features for the given device */
+ struct rte_device *device;
+ /**< Backing device */
+
+ uint8_t driver_id;
+ /**< Crypto driver identifier*/
+
+ struct rte_cryptodev_cb_list link_intr_cbs;
+ /**< User application callback for interrupts if present */
+
+ void *security_ctx;
+ /**< Context for security ops */
+
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+void *
+rte_cryptodev_get_sec_ctx(uint8_t dev_id);
+
+/**
+ *
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_cryptodev_data {
+ uint8_t dev_id;
+ /**< Device ID for this instance */
+ uint8_t socket_id;
+ /**< Socket ID where memory is allocated */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+
+ __extension__
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ struct rte_mempool *session_pool;
+ /**< Session memory pool */
+ void **queue_pairs;
+ /**< Array of pointers to queue pairs. */
+ uint16_t nb_queue_pairs;
+ /**< Number of device queue pairs. */
+
+ void *dev_private;
+ /**< PMD-specific private data */
+} __rte_cache_aligned;
+
+extern struct rte_cryptodev *rte_cryptodevs;
+/**
+ *
+ * Dequeue a burst of processed crypto operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
+ *
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
+ *
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
+ *
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
+ * notification to avoid the corresponding overhead.
+ *
+ * @param dev_id The symmetric crypto device identifier
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed packets. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
+ *
+ * @return
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
+ */
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
+
+ rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
+ return nb_ops;
+}
+
+/**
+ * Enqueue a burst of operations for processing on a crypto device.
+ *
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
+ *
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
+ *
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair which packets are
+ * to be enqueued for processing. The value
+ * must be in the range [0, nb_queue_pairs - 1]
+ * previously supplied to
+ * *rte_cryptodev_configure*.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
+ *
+ * @return
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or if invalid parameters are specified in
+ * a *rte_crypto_op*.
+ */
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+ rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
+ return (*dev->enqueue_burst)(
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
+}
+
+
+/** Cryptodev symmetric crypto session
+ * Each session is derived from a fixed xform chain. Therefore each session
+ * has a fixed algo, key, op-type, digest_len etc.
+ */
+struct rte_cryptodev_sym_session {
+ uint64_t opaque_data;
+ /**< Can be used for external metadata */
+ uint16_t nb_drivers;
+ /**< number of elements in sess_data array */
+ uint16_t user_data_sz;
+ /**< session user data will be placed after sess_data */
+ __extension__ struct {
+ void *data;
+ uint16_t refcnt;
+ } sess_data[0];
+ /**< Driver specific session material, variable size */
+};
+
+/** Cryptodev asymmetric crypto session */
+struct rte_cryptodev_asym_session {
+ __extension__ void *sess_private_data[0];
+ /**< Private asymmetric session material */
+};
+
+/**
+ * Create a symmetric session mempool.
+ *
+ * @param name
+ * The unique mempool name.
+ * @param nb_elts
+ * The number of elements in the mempool.
+ * @param elt_size
+ * The size of the element. This value will be ignored if it is smaller than
+ * the minimum session header size required for the system. For the user who
+ * want to use the same mempool for sym session and session private data it
+ * can be the maximum value of all existing devices' private data and session
+ * header sizes.
+ * @param cache_size
+ * The number of per-lcore cache elements
+ * @param priv_size
+ * The private data size of each session.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ *
+ * @return
+ * - On success return size of the session
+ * - On failure returns 0
+ */
+__rte_experimental
+struct rte_mempool *
+rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Create symmetric crypto session header (generic with no private data)
+ *
+ * @param mempool Symmetric session mempool to allocate session
+ * objects from
+ * @return
+ * - On success return pointer to sym-session
+ * - On failure returns NULL
+ */
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
+
+/**
+ * Create asymmetric crypto session header (generic with no private data)
+ *
+ * @param mempool mempool to allocate asymmetric session
+ * objects from
+ * @return
+ * - On success return pointer to asym-session
+ * - On failure returns NULL
+ */
+__rte_experimental
+struct rte_cryptodev_asym_session *
+rte_cryptodev_asym_session_create(struct rte_mempool *mempool);
+
+/**
+ * Frees symmetric crypto session header, after checking that all
+ * the device private data has been freed, returning it
+ * to its original mempool.
+ *
+ * @param sess Session header to be freed.
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if session is NULL.
+ * - -EBUSY if not all device private data has been freed.
+ */
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
+
+/**
+ * Frees asymmetric crypto session header, after checking that all
+ * the device private data has been freed, returning it
+ * to its original mempool.
+ *
+ * @param sess Session header to be freed.
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if session is NULL.
+ * - -EBUSY if not all device private data has been freed.
+ */
+__rte_experimental
+int
+rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess);
+
+/**
+ * Fill out private data for the device id, based on its device type.
+ *
+ * @param dev_id ID of device that we want the session to be used on
+ * @param sess Session where the private data will be attached to
+ * @param xforms Symmetric crypto transform operations to apply on flow
+ * processed with this session
+ * @param mempool Mempool where the private data is allocated.
+ *
+ * @return
+ * - On success, zero.
+ * - -EINVAL if input parameters are invalid.
+ * - -ENOTSUP if crypto device does not support the crypto transform or
+ * does not support symmetric operations.
+ * - -ENOMEM if the private session could not be allocated.
+ */
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mempool);
+
+/**
+ * Initialize asymmetric session on a device with specific asymmetric xform
+ *
+ * @param dev_id ID of device that we want the session to be used on
+ * @param sess Session to be set up on a device
+ * @param xforms Asymmetric crypto transform operations to apply on flow
+ * processed with this session
+ * @param mempool Mempool to be used for internal allocation.
+ *
+ * @return
+ * - On success, zero.
+ * - -EINVAL if input parameters are invalid.
+ * - -ENOTSUP if crypto device does not support the crypto transform.
+ * - -ENOMEM if the private session could not be allocated.
+ */
+__rte_experimental
+int
+rte_cryptodev_asym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_crypto_asym_xform *xforms,
+ struct rte_mempool *mempool);
+
+/**
+ * Frees private data for the device id, based on its device type,
+ * returning it to its mempool. It is the application's responsibility
+ * to ensure that private session data is not cleared while there are
+ * still in-flight operations using it.
+ *
+ * @param dev_id ID of device that uses the session.
+ * @param sess Session containing the reference to the private data
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if device is invalid or session is NULL.
+ * - -ENOTSUP if crypto device does not support symmetric operations.
+ */
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess);
+
+/**
+ * Frees resources held by asymmetric session during rte_cryptodev_session_init
+ *
+ * @param dev_id ID of device that uses the asymmetric session.
+ * @param sess Asymmetric session setup on device using
+ * rte_cryptodev_session_init
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if device is invalid or session is NULL.
+ */
+__rte_experimental
+int
+rte_cryptodev_asym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess);
+
+/**
+ * Get the size of the header session, for all registered drivers excluding
+ * the user data size.
+ *
+ * @return
+ * Size of the symmetric header session.
+ */
+unsigned int
+rte_cryptodev_sym_get_header_session_size(void);
+
+/**
+ * Get the size of the header session from created session.
+ *
+ * @param sess
+ * The sym cryptodev session pointer
+ *
+ * @return
+ * - If sess is not NULL, return the size of the header session including
+ * the private data size defined within sess.
+ * - If sess is NULL, return 0.
+ */
+__rte_experimental
+unsigned int
+rte_cryptodev_sym_get_existing_header_session_size(
+ struct rte_cryptodev_sym_session *sess);
+
+/**
+ * Get the size of the asymmetric session header, for all registered drivers.
+ *
+ * @return
+ * Size of the asymmetric header session.
+ */
+__rte_experimental
+unsigned int
+rte_cryptodev_asym_get_header_session_size(void);
+
+/**
+ * Get the size of the private symmetric session data
+ * for a device.
+ *
+ * @param dev_id The device identifier.
+ *
+ * @return
+ * - Size of the private data, if successful
+ * - 0 if device is invalid or does not have private
+ * symmetric session
+ */
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
+
+/**
+ * Get the size of the private data for asymmetric session
+ * on device
+ *
+ * @param dev_id The device identifier.
+ *
+ * @return
+ * - Size of the asymmetric private data, if successful
+ * - 0 if device is invalid or does not have private session
+ */
+__rte_experimental
+unsigned int
+rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
+
+/**
+ * Provide driver identifier.
+ *
+ * @param name
+ * The pointer to a driver name.
+ * @return
+ * The driver type identifier or -1 if no driver found
+ */
+int rte_cryptodev_driver_id_get(const char *name);
+
+/**
+ * Provide driver name.
+ *
+ * @param driver_id
+ * The driver identifier.
+ * @return
+ * The driver name or null if no driver found
+ */
+const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
+
+/**
+ * Store user data in a session.
+ *
+ * @param sess Session pointer allocated by
+ * *rte_cryptodev_sym_session_create*.
+ * @param data Pointer to the user data.
+ * @param size Size of the user data.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+__rte_experimental
+int
+rte_cryptodev_sym_session_set_user_data(
+ struct rte_cryptodev_sym_session *sess,
+ void *data,
+ uint16_t size);
+
+/**
+ * Get user data stored in a session.
+ *
+ * @param sess Session pointer allocated by
+ * *rte_cryptodev_sym_session_create*.
+ *
+ * @return
+ * - On success return pointer to user data.
+ * - On failure returns NULL.
+ */
+__rte_experimental
+void *
+rte_cryptodev_sym_session_get_user_data(
+ struct rte_cryptodev_sym_session *sess);
+
+/**
+ * Perform actual crypto processing (encrypt/digest or auth/decrypt)
+ * on user provided data.
+ *
+ * @param dev_id The device identifier.
+ * @param sess Cryptodev session structure
+ * @param ofs Start and stop offsets for auth and cipher operations
+ * @param vec Vectorized operation descriptor
+ *
+ * @return
+ * - Returns number of successfully processed packets.
+ */
+__rte_experimental
+uint32_t
+rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_sym_vec *vec);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTODEV_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.c b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.c
new file mode 100644
index 000000000..091200412
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.c
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_pmd.h"
+
+/**
+ * Parse name from argument
+ */
+static int
+rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+ int n;
+
+ n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+ if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Parse unsigned integer from argument
+ */
+static int
+rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int i;
+ char *end;
+ errno = 0;
+
+ i = strtol(value, &end, 10);
+ if (*end != 0 || errno != 0 || i < 0)
+ return -EINVAL;
+
+ *((uint32_t *)extra_args) = i;
+ return 0;
+}
+
+int
+rte_cryptodev_pmd_parse_input_args(
+ struct rte_cryptodev_pmd_init_params *params,
+ const char *args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (args) {
+ kvlist = rte_kvargs_parse(args, cryptodev_pmd_valid_params);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ &rte_cryptodev_pmd_parse_uint_arg,
+ &params->max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ &rte_cryptodev_pmd_parse_uint_arg,
+ &params->socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ &rte_cryptodev_pmd_parse_name_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+struct rte_cryptodev *
+rte_cryptodev_pmd_create(const char *name,
+ struct rte_device *device,
+ struct rte_cryptodev_pmd_init_params *params)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (params->name[0] != '\0') {
+ CDEV_LOG_INFO("User specified device name = %s\n", params->name);
+ name = params->name;
+ }
+
+ CDEV_LOG_INFO("Creating cryptodev %s\n", name);
+
+ CDEV_LOG_INFO("Initialisation parameters - name: %s,"
+ "socket id: %d, max queue pairs: %u",
+ name, params->socket_id, params->max_nb_queue_pairs);
+
+ /* allocate device structure */
+ cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
+ if (cryptodev == NULL) {
+ CDEV_LOG_ERR("Failed to allocate crypto device for %s", name);
+ return NULL;
+ }
+
+ /* allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private =
+ rte_zmalloc_socket("cryptodev device private",
+ params->private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ params->socket_id);
+
+ if (cryptodev->data->dev_private == NULL) {
+ CDEV_LOG_ERR("Cannot allocate memory for cryptodev %s"
+ " private data", name);
+
+ rte_cryptodev_pmd_release_device(cryptodev);
+ return NULL;
+ }
+ }
+
+ cryptodev->device = device;
+
+ /* initialise user call-back tail queue */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ return cryptodev;
+}
+
+int
+rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
+{
+ int retval;
+
+ CDEV_LOG_INFO("Closing crypto device %s", cryptodev->device->name);
+
+ /* free crypto device */
+ retval = rte_cryptodev_pmd_release_device(cryptodev);
+ if (retval)
+ return retval;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+
+ cryptodev->device = NULL;
+ cryptodev->data = NULL;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h
new file mode 100644
index 000000000..81975d72b
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020 Intel Corporation.
+ */
+
+#ifndef _RTE_CRYPTODEV_PMD_H_
+#define _RTE_CRYPTODEV_PMD_H_
+
+/** @file
+ * RTE Crypto PMD APIs
+ *
+ * @note
+ * These API are from crypto PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_crypto.h"
+#include "rte_cryptodev.h"
+
+
+#define RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 8
+
+#define RTE_CRYPTODEV_PMD_NAME_ARG ("name")
+#define RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_PMD_SOCKET_ID_ARG ("socket_id")
+
+
+static const char * const cryptodev_pmd_valid_params[] = {
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG
+};
+
+/**
+ * @internal
+ * Initialisation parameters for crypto devices
+ */
+struct rte_cryptodev_pmd_init_params {
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ size_t private_data_size;
+ int socket_id;
+ unsigned int max_nb_queue_pairs;
+};
+
+/** Global structure used for maintaining state of allocated crypto devices */
+struct rte_cryptodev_global {
+ struct rte_cryptodev *devs; /**< Device information array */
+ struct rte_cryptodev_data *data[RTE_CRYPTO_MAX_DEVS];
+ /**< Device private data */
+ uint8_t nb_devs; /**< Number of devices found */
+};
+
+/* Cryptodev driver, containing the driver ID */
+struct cryptodev_driver {
+ TAILQ_ENTRY(cryptodev_driver) next; /**< Next in list. */
+ const struct rte_driver *driver;
+ uint8_t id;
+};
+
+/**
+ * Get the rte_cryptodev structure device pointer for the device. Assumes a
+ * valid device index.
+ *
+ * @param dev_id Device ID value to select the device structure.
+ *
+ * @return
+ * - The rte_cryptodev structure pointer for the given device ID.
+ */
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_dev(uint8_t dev_id);
+
+/**
+ * Get the rte_cryptodev structure device pointer for the named device.
+ *
+ * @param name device name to select the device structure.
+ *
+ * @return
+ * - The rte_cryptodev structure pointer for the given device ID.
+ */
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_named_dev(const char *name);
+
+/**
+ * Validate if the crypto device index is valid attached crypto device.
+ *
+ * @param dev_id Crypto device index.
+ *
+ * @return
+ * - If the device index is valid (1) or not (0).
+ */
+unsigned int
+rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id);
+
+/**
+ * The pool of rte_cryptodev structures.
+ */
+extern struct rte_cryptodev *rte_cryptodevs;
+
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *crypto_dev_ops* supplied in the
+ * *rte_cryptodev* structure associated with a device.
+ */
+
+/**
+ * Function used to configure device.
+ *
+ * @param dev Crypto device pointer
+ * config Crypto device configurations
+ *
+ * @return Returns 0 on success
+ */
+typedef int (*cryptodev_configure_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+
+/**
+ * Function used to start a configured device.
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return Returns 0 on success
+ */
+typedef int (*cryptodev_start_t)(struct rte_cryptodev *dev);
+
+/**
+ * Function used to stop a configured device.
+ *
+ * @param dev Crypto device pointer
+ */
+typedef void (*cryptodev_stop_t)(struct rte_cryptodev *dev);
+
+/**
+ * Function used to close a configured device.
+ *
+ * @param dev Crypto device pointer
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*cryptodev_close_t)(struct rte_cryptodev *dev);
+
+
+/**
+ * Function used to get statistics of a device.
+ *
+ * @param dev Crypto device pointer
+ * @param stats Pointer to crypto device stats structure to populate
+ */
+typedef void (*cryptodev_stats_get_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+
+
+/**
+ * Function used to reset statistics of a device.
+ *
+ * @param dev Crypto device pointer
+ */
+typedef void (*cryptodev_stats_reset_t)(struct rte_cryptodev *dev);
+
+
+/**
+ * Function used to get specific information of a device.
+ *
+ * @param dev Crypto device pointer
+ */
+typedef void (*cryptodev_info_get_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+
+/**
+ * Setup a queue pair for a device.
+ *
+ * @param dev Crypto device pointer
+ * @param qp_id Queue Pair Index
+ * @param qp_conf Queue configuration structure
+ * @param socket_id Socket Index
+ *
+ * @return Returns 0 on success.
+ */
+typedef int (*cryptodev_queue_pair_setup_t)(struct rte_cryptodev *dev,
+ uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id);
+
+/**
+ * Release memory resources allocated by given queue pair.
+ *
+ * @param dev Crypto device pointer
+ * @param qp_id Queue Pair Index
+ *
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*cryptodev_queue_pair_release_t)(struct rte_cryptodev *dev,
+ uint16_t qp_id);
+
+/**
+ * Create a session mempool to allocate sessions from
+ *
+ * @param dev Crypto device pointer
+ * @param nb_objs number of sessions objects in mempool
+ * @param obj_cache l-core object cache size, see *rte_ring_create*
+ * @param socket_id Socket Id to allocate mempool on.
+ *
+ * @return
+ * - On success returns a pointer to a rte_mempool
+ * - On failure returns a NULL pointer
+ */
+typedef int (*cryptodev_sym_create_session_pool_t)(
+ struct rte_cryptodev *dev, unsigned nb_objs,
+ unsigned obj_cache_size, int socket_id);
+
+
+/**
+ * Get the size of a cryptodev session
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return
+ * - On success returns the size of the session structure for device
+ * - On failure returns 0
+ */
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
+ struct rte_cryptodev *dev);
+/**
+ * Get the size of a asymmetric cryptodev session
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return
+ * - On success returns the size of the session structure for device
+ * - On failure returns 0
+ */
+typedef unsigned int (*cryptodev_asym_get_session_private_size_t)(
+ struct rte_cryptodev *dev);
+
+/**
+ * Configure a Crypto session on a device.
+ *
+ * @param dev Crypto device pointer
+ * @param xform Single or chain of crypto xforms
+ * @param priv_sess Pointer to cryptodev's private session structure
+ * @param mp Mempool where the private session is allocated
+ *
+ * @return
+ * - Returns 0 if private session structure have been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if crypto device does not support the crypto transform.
+ * - Returns -ENOMEM if the private session could not be allocated.
+ */
+typedef int (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+/**
+ * Configure a Crypto asymmetric session on a device.
+ *
+ * @param dev Crypto device pointer
+ * @param xform Single or chain of crypto xforms
+ * @param priv_sess Pointer to cryptodev's private session structure
+ * @param mp Mempool where the private session is allocated
+ *
+ * @return
+ * - Returns 0 if private session structure have been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if crypto device does not support the crypto transform.
+ * - Returns -ENOMEM if the private session could not be allocated.
+ */
+typedef int (*cryptodev_asym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *session,
+ struct rte_mempool *mp);
+/**
+ * Free driver private session data.
+ *
+ * @param dev Crypto device pointer
+ * @param sess Cryptodev session structure
+ */
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+/**
+ * Free asymmetric session private data.
+ *
+ * @param dev Crypto device pointer
+ * @param sess Cryptodev session structure
+ */
+typedef void (*cryptodev_asym_free_session_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess);
+/**
+ * Perform actual crypto processing (encrypt/digest or auth/decrypt)
+ * on user provided data.
+ *
+ * @param dev Crypto device pointer
+ * @param sess Cryptodev session structure
+ * @param ofs Start and stop offsets for auth and cipher operations
+ * @param vec Vectorized operation descriptor
+ *
+ * @return
+ * - Returns number of successfully processed packets.
+ *
+ */
+typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
+ (struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
+
+
+/** Crypto device operations function pointer table */
+struct rte_cryptodev_ops {
+ cryptodev_configure_t dev_configure; /**< Configure device. */
+ cryptodev_start_t dev_start; /**< Start device. */
+ cryptodev_stop_t dev_stop; /**< Stop device. */
+ cryptodev_close_t dev_close; /**< Close device. */
+
+ cryptodev_info_get_t dev_infos_get; /**< Get device info. */
+
+ cryptodev_stats_get_t stats_get;
+ /**< Get device statistics. */
+ cryptodev_stats_reset_t stats_reset;
+ /**< Reset device statistics. */
+
+ cryptodev_queue_pair_setup_t queue_pair_setup;
+ /**< Set up a device queue pair. */
+ cryptodev_queue_pair_release_t queue_pair_release;
+ /**< Release a queue pair. */
+
+ cryptodev_sym_get_session_private_size_t sym_session_get_size;
+ /**< Return private session. */
+ cryptodev_asym_get_session_private_size_t asym_session_get_size;
+ /**< Return asym session private size. */
+ cryptodev_sym_configure_session_t sym_session_configure;
+ /**< Configure a Crypto session. */
+ cryptodev_asym_configure_session_t asym_session_configure;
+ /**< Configure asymmetric Crypto session. */
+ cryptodev_sym_free_session_t sym_session_clear;
+ /**< Clear a Crypto sessions private data. */
+ cryptodev_asym_free_session_t asym_session_clear;
+ /**< Clear a Crypto sessions private data. */
+ cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
+ /**< process input data synchronously (cpu-crypto). */
+};
+
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Allocates a new cryptodev slot for an crypto device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name Unique identifier name for each device
+ * @param socket_id Socket to allocate resources on.
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_cryptodev *
+rte_cryptodev_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Release the specified cryptodev device.
+ *
+ * @param cryptodev
+ * The *cryptodev* pointer is the address of the *rte_cryptodev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+extern int
+rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev);
+
+
+/**
+ * @internal
+ *
+ * PMD assist function to parse initialisation arguments for crypto driver
+ * when creating a new crypto PMD device instance.
+ *
+ * PMD driver should set default values for that PMD before calling function,
+ * these default values will be over-written with successfully parsed values
+ * from args string.
+ *
+ * @param params parsed PMD initialisation parameters
+ * @param args input argument string to parse
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int
+rte_cryptodev_pmd_parse_input_args(
+ struct rte_cryptodev_pmd_init_params *params,
+ const char *args);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for crypto driver to create
+ * and allocate resources for a new crypto PMD device instance.
+ *
+ * @param name crypto device name.
+ * @param device base device instance
+ * @param params PMD initialisation parameters
+ *
+ * @return
+ * - crypto device instance on success
+ * - NULL on creation failure
+ */
+struct rte_cryptodev *
+rte_cryptodev_pmd_create(const char *name,
+ struct rte_device *device,
+ struct rte_cryptodev_pmd_init_params *params);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for crypto driver to
+ * destroy and free resources associated with a crypto PMD device instance.
+ *
+ * @param cryptodev crypto device handle.
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int
+rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev);
+
+/**
+ * Executes all the user application registered callbacks for the specific
+ * device.
+ * *
+ * @param dev Pointer to cryptodev struct
+ * @param event Crypto device interrupt event type.
+ *
+ * @return
+ * void
+ */
+void rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
+ enum rte_cryptodev_event_type event);
+
+/**
+ * @internal
+ * Create unique device name
+ */
+int
+rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+
+/**
+ * @internal
+ * Allocate Cryptodev driver.
+ *
+ * @param crypto_drv
+ * Pointer to cryptodev_driver.
+ * @param drv
+ * Pointer to rte_driver.
+ *
+ * @return
+ * The driver type identifier
+ */
+uint8_t rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
+ const struct rte_driver *drv);
+
+
+#define RTE_PMD_REGISTER_CRYPTO_DRIVER(crypto_drv, drv, driver_id)\
+RTE_INIT(init_ ##driver_id)\
+{\
+ driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv));\
+}
+
+static inline void *
+get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
+ uint8_t driver_id) {
+ if (unlikely(sess->nb_drivers <= driver_id))
+ return NULL;
+
+ return sess->sess_data[driver_id].data;
+}
+
+static inline void
+set_sym_session_private_data(struct rte_cryptodev_sym_session *sess,
+ uint8_t driver_id, void *private_data)
+{
+ if (unlikely(sess->nb_drivers <= driver_id)) {
+ CDEV_LOG_ERR("Set private data for driver %u not allowed\n",
+ driver_id);
+ return;
+ }
+
+ sess->sess_data[driver_id].data = private_data;
+}
+
+static inline void *
+get_asym_session_private_data(const struct rte_cryptodev_asym_session *sess,
+ uint8_t driver_id) {
+ return sess->sess_private_data[driver_id];
+}
+
+static inline void
+set_asym_session_private_data(struct rte_cryptodev_asym_session *sess,
+ uint8_t driver_id, void *private_data)
+{
+ sess->sess_private_data[driver_id] = private_data;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTODEV_PMD_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace.h
new file mode 100644
index 000000000..d1f4f069a
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_CRYPTODEV_TRACE_H_
+#define _RTE_CRYPTODEV_TRACE_H_
+
+/**
+ * @file
+ *
+ * API for cryptodev trace support
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_trace_point.h>
+
+#include "rte_cryptodev.h"
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_configure,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id,
+ struct rte_cryptodev_config *conf),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u16(conf->nb_queue_pairs);
+ rte_trace_point_emit_i64(conf->ff_disable);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_start,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, int rc),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_int(rc);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_stop,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id),
+ rte_trace_point_emit_u8(dev_id);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_close,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, int rc),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_int(rc);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_queue_pair_setup,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *conf),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u16(queue_pair_id);
+ rte_trace_point_emit_u32(conf->nb_descriptors);
+ rte_trace_point_emit_ptr(conf->mp_session);
+ rte_trace_point_emit_ptr(conf->mp_session_private);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_sym_session_pool_create,
+ RTE_TRACE_POINT_ARGS(const char *name, uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size,
+ uint16_t user_data_size, void *mempool),
+ rte_trace_point_emit_string(name);
+ rte_trace_point_emit_u32(nb_elts);
+ rte_trace_point_emit_u32(elt_size);
+ rte_trace_point_emit_u32(cache_size);
+ rte_trace_point_emit_u16(user_data_size);
+ rte_trace_point_emit_ptr(mempool);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_sym_session_create,
+ RTE_TRACE_POINT_ARGS(void *mempool,
+ struct rte_cryptodev_sym_session *sess),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(sess);
+ rte_trace_point_emit_u64(sess->opaque_data);
+ rte_trace_point_emit_u16(sess->nb_drivers);
+ rte_trace_point_emit_u16(sess->user_data_sz);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_asym_session_create,
+ RTE_TRACE_POINT_ARGS(void *mempool,
+ struct rte_cryptodev_asym_session *sess),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(sess);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_sym_session_free,
+ RTE_TRACE_POINT_ARGS(struct rte_cryptodev_sym_session *sess),
+ rte_trace_point_emit_ptr(sess);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_asym_session_free,
+ RTE_TRACE_POINT_ARGS(struct rte_cryptodev_asym_session *sess),
+ rte_trace_point_emit_ptr(sess);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_sym_session_init,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess, void *xforms,
+ void *mempool),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_ptr(sess);
+ rte_trace_point_emit_u64(sess->opaque_data);
+ rte_trace_point_emit_u16(sess->nb_drivers);
+ rte_trace_point_emit_u16(sess->user_data_sz);
+ rte_trace_point_emit_ptr(xforms);
+ rte_trace_point_emit_ptr(mempool);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_asym_session_init,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess, void *xforms,
+ void *mempool),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_ptr(sess);
+ rte_trace_point_emit_ptr(xforms);
+ rte_trace_point_emit_ptr(mempool);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_sym_session_clear,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, void *sess),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_ptr(sess);
+)
+
+RTE_TRACE_POINT(
+ rte_cryptodev_trace_asym_session_clear,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, void *sess),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_ptr(sess);
+)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTODEV_TRACE_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace_fp.h b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace_fp.h
new file mode 100644
index 000000000..9218997c1
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_trace_fp.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_CRYPTODEV_TRACE_FP_H_
+#define _RTE_CRYPTODEV_TRACE_FP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_trace_point.h>
+
+RTE_TRACE_POINT_FP(
+ rte_cryptodev_trace_enqueue_burst,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint16_t qp_id, void **ops,
+ uint16_t nb_ops),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u16(qp_id);
+ rte_trace_point_emit_ptr(ops);
+ rte_trace_point_emit_u16(nb_ops);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_cryptodev_trace_dequeue_burst,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint16_t qp_id, void **ops,
+ uint16_t nb_ops),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u16(qp_id);
+ rte_trace_point_emit_ptr(ops);
+ rte_trace_point_emit_u16(nb_ops);
+)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTODEV_TRACE_FP_H_ */
diff --git a/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_version.map b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_version.map
new file mode 100644
index 000000000..07a2d2f02
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -0,0 +1,106 @@
+DPDK_20.0 {
+ global:
+
+ rte_crypto_aead_algorithm_strings;
+ rte_crypto_aead_operation_strings;
+ rte_crypto_auth_algorithm_strings;
+ rte_crypto_auth_operation_strings;
+ rte_crypto_cipher_algorithm_strings;
+ rte_crypto_cipher_operation_strings;
+ rte_crypto_op_pool_create;
+ rte_cryptodev_allocate_driver;
+ rte_cryptodev_callback_register;
+ rte_cryptodev_callback_unregister;
+ rte_cryptodev_close;
+ rte_cryptodev_configure;
+ rte_cryptodev_count;
+ rte_cryptodev_device_count_by_driver;
+ rte_cryptodev_devices_get;
+ rte_cryptodev_driver_id_get;
+ rte_cryptodev_driver_name_get;
+ rte_cryptodev_get_aead_algo_enum;
+ rte_cryptodev_get_auth_algo_enum;
+ rte_cryptodev_get_cipher_algo_enum;
+ rte_cryptodev_get_dev_id;
+ rte_cryptodev_get_feature_name;
+ rte_cryptodev_get_sec_ctx;
+ rte_cryptodev_info_get;
+ rte_cryptodev_name_get;
+ rte_cryptodev_pmd_allocate;
+ rte_cryptodev_pmd_callback_process;
+ rte_cryptodev_pmd_create;
+ rte_cryptodev_pmd_create_dev_name;
+ rte_cryptodev_pmd_destroy;
+ rte_cryptodev_pmd_get_dev;
+ rte_cryptodev_pmd_get_named_dev;
+ rte_cryptodev_pmd_is_valid_dev;
+ rte_cryptodev_pmd_parse_input_args;
+ rte_cryptodev_pmd_release_device;
+ rte_cryptodev_queue_pair_count;
+ rte_cryptodev_queue_pair_setup;
+ rte_cryptodev_socket_id;
+ rte_cryptodev_start;
+ rte_cryptodev_stats_get;
+ rte_cryptodev_stats_reset;
+ rte_cryptodev_stop;
+ rte_cryptodev_sym_capability_check_aead;
+ rte_cryptodev_sym_capability_check_auth;
+ rte_cryptodev_sym_capability_check_cipher;
+ rte_cryptodev_sym_capability_get;
+ rte_cryptodev_sym_get_header_session_size;
+ rte_cryptodev_sym_get_private_session_size;
+ rte_cryptodev_sym_session_clear;
+ rte_cryptodev_sym_session_create;
+ rte_cryptodev_sym_session_free;
+ rte_cryptodev_sym_session_init;
+ rte_cryptodevs;
+
+ local: *;
+};
+
+DPDK_21 {
+ global:
+ rte_cryptodev_info_get;
+ rte_cryptodev_sym_capability_get;
+} DPDK_20.0;
+
+
+EXPERIMENTAL {
+ global:
+
+ rte_cryptodev_asym_capability_get;
+ rte_cryptodev_asym_get_header_session_size;
+ rte_cryptodev_asym_get_private_session_size;
+ rte_cryptodev_asym_get_xform_enum;
+ rte_cryptodev_asym_session_clear;
+ rte_cryptodev_asym_session_create;
+ rte_cryptodev_asym_session_free;
+ rte_cryptodev_asym_session_init;
+ rte_cryptodev_asym_xform_capability_check_modlen;
+ rte_cryptodev_asym_xform_capability_check_optype;
+ rte_cryptodev_sym_cpu_crypto_process;
+ rte_cryptodev_sym_get_existing_header_session_size;
+ rte_cryptodev_sym_session_get_user_data;
+ rte_cryptodev_sym_session_pool_create;
+ rte_cryptodev_sym_session_set_user_data;
+ rte_crypto_asym_op_strings;
+ rte_crypto_asym_xform_strings;
+
+ # added in 20.05
+ __rte_cryptodev_trace_configure;
+ __rte_cryptodev_trace_start;
+ __rte_cryptodev_trace_stop;
+ __rte_cryptodev_trace_close;
+ __rte_cryptodev_trace_queue_pair_setup;
+ __rte_cryptodev_trace_sym_session_pool_create;
+ __rte_cryptodev_trace_sym_session_create;
+ __rte_cryptodev_trace_asym_session_create;
+ __rte_cryptodev_trace_sym_session_free;
+ __rte_cryptodev_trace_asym_session_free;
+ __rte_cryptodev_trace_sym_session_init;
+ __rte_cryptodev_trace_asym_session_init;
+ __rte_cryptodev_trace_sym_session_clear;
+ __rte_cryptodev_trace_asym_session_clear;
+ __rte_cryptodev_trace_dequeue_burst;
+ __rte_cryptodev_trace_enqueue_burst;
+};