diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /src/seastar/dpdk/drivers/crypto/aesni_gcm | |
parent | Initial commit. (diff) | |
download | ceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/seastar/dpdk/drivers/crypto/aesni_gcm')
7 files changed, 1268 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile b/src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile new file mode 100644 index 000000000..39eff3db0 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016-2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_aesni_gcm.a + +# build flags +CFLAGS += -O3 +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += $(WERROR_FLAGS) + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_aesni_gcm_version.map + +# external library dependencies +LDLIBS += -lIPSec_MB +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_bus_vdev + +IMB_HDR = $(shell echo '\#include <intel-ipsec-mb.h>' | \ + $(CC) -E $(EXTRA_CFLAGS) - | grep 'intel-ipsec-mb.h' | \ + head -n1 | cut -d'"' -f2) + +# Detect library version +IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2) +IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3) + +ifeq ($(IMB_VERSION),) +$(error "IPSec_MB version >= 0.52 is required") +endif + +ifeq ($(shell expr $(IMB_VERSION_NUM) \< 0x3400), 1) +$(error "IPSec_MB version >= 0.52 is required") +endif + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h new file mode 100644 index 000000000..e272f1067 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#ifndef _AESNI_GCM_OPS_H_ +#define _AESNI_GCM_OPS_H_ + +#ifndef LINUX +#define LINUX +#endif + +#include <intel-ipsec-mb.h> + +/** Supported vector modes */ +enum aesni_gcm_vector_mode { + RTE_AESNI_GCM_NOT_SUPPORTED = 0, + RTE_AESNI_GCM_SSE, + RTE_AESNI_GCM_AVX, + RTE_AESNI_GCM_AVX2, + RTE_AESNI_GCM_AVX512, + RTE_AESNI_GCM_VECTOR_NUM +}; + +enum aesni_gcm_key { + GCM_KEY_128 = 0, + GCM_KEY_192, + GCM_KEY_256, + GCM_KEY_NUM +}; + +typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, uint8_t *out, + const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv, + const uint8_t *aad, uint64_t aad_len, + uint8_t *auth_tag, uint64_t auth_tag_len); + +typedef void (*aesni_gcm_pre_t)(const void *key, struct gcm_key_data *gcm_data); + +typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, + const uint8_t *iv, + uint8_t const *aad, + uint64_t aad_len); + +typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, + uint8_t *out, + const uint8_t *in, + uint64_t plaintext_len); + +typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, + uint8_t *auth_tag, + uint64_t auth_tag_len); + +/** GCM library function pointer table */ +struct aesni_gcm_ops { + aesni_gcm_t enc; /**< GCM encode function pointer */ + aesni_gcm_t dec; /**< GCM decode function pointer */ + aesni_gcm_pre_t pre; /**< GCM pre-compute */ + aesni_gcm_init_t init; + aesni_gcm_update_t update_enc; + aesni_gcm_update_t update_dec; + aesni_gcm_finalize_t finalize_enc; + aesni_gcm_finalize_t finalize_dec; +}; + +#endif /* _AESNI_GCM_OPS_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c new file mode 100644 index 000000000..49ee31708 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -0,0 +1,670 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#include <rte_common.h> +#include <rte_hexdump.h> +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_bus_vdev.h> +#include <rte_malloc.h> +#include <rte_cpuflags.h> +#include <rte_byteorder.h> + +#include "aesni_gcm_pmd_private.h" + +static uint8_t cryptodev_driver_id; + +/** Parse crypto xform chain and set private session parameters */ +int +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, + struct aesni_gcm_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_sym_xform *auth_xform; + const struct rte_crypto_sym_xform *aead_xform; + uint8_t key_length; + uint8_t *key; + + /* AES-GMAC */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + auth_xform = xform; + if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) { + AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an " + "authentication only algorithm"); + return -ENOTSUP; + } + /* Set IV parameters */ + sess->iv.offset = auth_xform->auth.iv.offset; + sess->iv.length = auth_xform->auth.iv.length; + + /* Select Crypto operation */ + if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->op = AESNI_GMAC_OP_GENERATE; + else + sess->op = AESNI_GMAC_OP_VERIFY; + + key_length = auth_xform->auth.key.length; + key = auth_xform->auth.key.data; + sess->req_digest_length = auth_xform->auth.digest_length; + + /* AES-GCM */ + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + aead_xform = xform; + + if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) { + AESNI_GCM_LOG(ERR, "The only combined operation " + "supported is AES GCM"); + return -ENOTSUP; + } + + /* Set IV parameters */ + sess->iv.offset = aead_xform->aead.iv.offset; + sess->iv.length = aead_xform->aead.iv.length; + + /* Select Crypto operation */ + if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + else + sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; + + key_length = aead_xform->aead.key.length; + key = aead_xform->aead.key.data; + + sess->aad_length = aead_xform->aead.aad_length; + sess->req_digest_length = aead_xform->aead.digest_length; + } else { + AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication"); + return -ENOTSUP; + } + + + /* IV check */ + if (sess->iv.length != 16 && sess->iv.length != 12 && + sess->iv.length != 0) { + AESNI_GCM_LOG(ERR, "Wrong IV length"); + return -EINVAL; + } + + /* Check key length and calculate GCM pre-compute. */ + switch (key_length) { + case 16: + sess->key = GCM_KEY_128; + break; + case 24: + sess->key = GCM_KEY_192; + break; + case 32: + sess->key = GCM_KEY_256; + break; + default: + AESNI_GCM_LOG(ERR, "Invalid key length"); + return -EINVAL; + } + + gcm_ops[sess->key].pre(key, &sess->gdata_key); + + /* Digest check */ + if (sess->req_digest_length > 16) { + AESNI_GCM_LOG(ERR, "Invalid digest length"); + return -EINVAL; + } + /* + * Multi-buffer lib supports digest sizes from 4 to 16 bytes + * in version 0.50 and sizes of 8, 12 and 16 bytes, + * in version 0.49. + * If size requested is different, generate the full digest + * (16 bytes) in a temporary location and then memcpy + * the requested number of bytes. + */ +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + if (sess->req_digest_length < 4) +#else + if (sess->req_digest_length != 16 && + sess->req_digest_length != 12 && + sess->req_digest_length != 8) +#endif + sess->gen_digest_length = 16; + else + sess->gen_digest_length = sess->req_digest_length; + + return 0; +} + +/** Get gcm session */ +static struct aesni_gcm_session * +aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) +{ + struct aesni_gcm_session *sess = NULL; + struct rte_crypto_sym_op *sym_op = op->sym; + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (likely(sym_op->session != NULL)) + sess = (struct aesni_gcm_session *) + get_sym_session_private_data( + sym_op->session, + cryptodev_driver_id); + } else { + void *_sess; + void *_sess_private_data = NULL; + + if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) + return NULL; + + if (rte_mempool_get(qp->sess_mp_priv, + (void **)&_sess_private_data)) + return NULL; + + sess = (struct aesni_gcm_session *)_sess_private_data; + + if (unlikely(aesni_gcm_set_session_parameters(qp->ops, + sess, sym_op->xform) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp_priv, _sess_private_data); + sess = NULL; + } + sym_op->session = (struct rte_cryptodev_sym_session *)_sess; + set_sym_session_private_data(sym_op->session, + cryptodev_driver_id, _sess_private_data); + } + + if (unlikely(sess == NULL)) + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + + return sess; +} + +/** + * Process a crypto operation, calling + * the GCM API from the multi buffer library. + * + * @param qp queue pair + * @param op symmetric crypto operation + * @param session GCM session + * + * @return + * + */ +static int +process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, + struct aesni_gcm_session *session) +{ + uint8_t *src, *dst; + uint8_t *iv_ptr; + struct rte_crypto_sym_op *sym_op = op->sym; + struct rte_mbuf *m_src = sym_op->m_src; + uint32_t offset, data_offset, data_length; + uint32_t part_len, total_len, data_len; + uint8_t *tag; + + if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION || + session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + offset = sym_op->aead.data.offset; + data_offset = offset; + data_length = sym_op->aead.data.length; + } else { + offset = sym_op->auth.data.offset; + data_offset = offset; + data_length = sym_op->auth.data.length; + } + + RTE_ASSERT(m_src != NULL); + + while (offset >= m_src->data_len && data_length != 0) { + offset -= m_src->data_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + } + + data_len = m_src->data_len - offset; + part_len = (data_len < data_length) ? data_len : + data_length; + + /* Destination buffer is required when segmented source buffer */ + RTE_ASSERT((part_len == data_length) || + ((part_len != data_length) && + (sym_op->m_dst != NULL))); + /* Segmented destination buffer is not supported */ + RTE_ASSERT((sym_op->m_dst == NULL) || + ((sym_op->m_dst != NULL) && + rte_pktmbuf_is_contiguous(sym_op->m_dst))); + + + dst = sym_op->m_dst ? + rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *, + data_offset) : + rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, + data_offset); + + src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + + iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + session->iv.offset); + + if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + sym_op->aead.aad.data, + (uint64_t)session->aad_length); + + qp->ops[session->key].update_enc(&session->gdata_key, + &qp->gdata_ctx, dst, src, + (uint64_t)part_len); + total_len = data_length - part_len; + + while (total_len) { + dst += part_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + + src = rte_pktmbuf_mtod(m_src, uint8_t *); + part_len = (m_src->data_len < total_len) ? + m_src->data_len : total_len; + + qp->ops[session->key].update_enc(&session->gdata_key, + &qp->gdata_ctx, dst, src, + (uint64_t)part_len); + total_len -= part_len; + } + + if (session->req_digest_length != session->gen_digest_length) + tag = qp->temp_digest; + else + tag = sym_op->aead.digest.data; + + qp->ops[session->key].finalize_enc(&session->gdata_key, + &qp->gdata_ctx, + tag, + session->gen_digest_length); + } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + sym_op->aead.aad.data, + (uint64_t)session->aad_length); + + qp->ops[session->key].update_dec(&session->gdata_key, + &qp->gdata_ctx, dst, src, + (uint64_t)part_len); + total_len = data_length - part_len; + + while (total_len) { + dst += part_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + + src = rte_pktmbuf_mtod(m_src, uint8_t *); + part_len = (m_src->data_len < total_len) ? + m_src->data_len : total_len; + + qp->ops[session->key].update_dec(&session->gdata_key, + &qp->gdata_ctx, + dst, src, + (uint64_t)part_len); + total_len -= part_len; + } + + tag = qp->temp_digest; + qp->ops[session->key].finalize_dec(&session->gdata_key, + &qp->gdata_ctx, + tag, + session->gen_digest_length); + } else if (session->op == AESNI_GMAC_OP_GENERATE) { + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + src, + (uint64_t)data_length); + if (session->req_digest_length != session->gen_digest_length) + tag = qp->temp_digest; + else + tag = sym_op->auth.digest.data; + qp->ops[session->key].finalize_enc(&session->gdata_key, + &qp->gdata_ctx, + tag, + session->gen_digest_length); + } else { /* AESNI_GMAC_OP_VERIFY */ + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + src, + (uint64_t)data_length); + + /* + * Generate always 16 bytes and later compare only + * the bytes passed. + */ + tag = qp->temp_digest; + qp->ops[session->key].finalize_enc(&session->gdata_key, + &qp->gdata_ctx, + tag, + session->gen_digest_length); + } + + return 0; +} + +/** + * Process a completed job and return rte_mbuf which job processed + * + * @param job JOB_AES_HMAC job to process + * + * @return + * - Returns processed mbuf which is trimmed of output digest used in + * verification of supplied digest in the case of a HASH_CIPHER operation + * - Returns NULL on invalid job + */ +static void +post_process_gcm_crypto_op(struct aesni_gcm_qp *qp, + struct rte_crypto_op *op, + struct aesni_gcm_session *session) +{ + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Verify digest if required */ + if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION || + session->op == AESNI_GMAC_OP_VERIFY) { + uint8_t *digest; + + uint8_t *tag = qp->temp_digest; + + if (session->op == AESNI_GMAC_OP_VERIFY) + digest = op->sym->auth.digest.data; + else + digest = op->sym->aead.digest.data; + +#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG + rte_hexdump(stdout, "auth tag (orig):", + digest, session->req_digest_length); + rte_hexdump(stdout, "auth tag (calc):", + tag, session->req_digest_length); +#endif + + if (memcmp(tag, digest, session->req_digest_length) != 0) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + if (session->req_digest_length != session->gen_digest_length) { + if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) + memcpy(op->sym->aead.digest.data, qp->temp_digest, + session->req_digest_length); + else + memcpy(op->sym->auth.digest.data, qp->temp_digest, + session->req_digest_length); + } + } +} + +/** + * Process a completed GCM request + * + * @param qp Queue Pair to process + * @param op Crypto operation + * @param job JOB_AES_HMAC job + * + * @return + * - Number of processed jobs + */ +static void +handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, + struct rte_crypto_op *op, + struct aesni_gcm_session *sess) +{ + post_process_gcm_crypto_op(qp, op, sess); + + /* Free session if a session-less crypto op */ + if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + memset(sess, 0, sizeof(struct aesni_gcm_session)); + memset(op->sym->session, 0, + rte_cryptodev_sym_get_existing_header_session_size( + op->sym->session)); + rte_mempool_put(qp->sess_mp_priv, sess); + rte_mempool_put(qp->sess_mp, op->sym->session); + op->sym->session = NULL; + } +} + +static uint16_t +aesni_gcm_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct aesni_gcm_session *sess; + struct aesni_gcm_qp *qp = queue_pair; + + int retval = 0; + unsigned int i, nb_dequeued; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, + (void **)ops, nb_ops, NULL); + + for (i = 0; i < nb_dequeued; i++) { + + sess = aesni_gcm_get_session(qp, ops[i]); + if (unlikely(sess == NULL)) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + qp->qp_stats.dequeue_err_count++; + break; + } + + retval = process_gcm_crypto_op(qp, ops[i], sess); + if (retval < 0) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + qp->qp_stats.dequeue_err_count++; + break; + } + + handle_completed_gcm_crypto_op(qp, ops[i], sess); + } + + qp->qp_stats.dequeued_count += i; + + return i; +} + +static uint16_t +aesni_gcm_pmd_enqueue_burst(void *queue_pair, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct aesni_gcm_qp *qp = queue_pair; + + unsigned int nb_enqueued; + + nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts, + (void **)ops, nb_ops, NULL); + qp->qp_stats.enqueued_count += nb_enqueued; + + return nb_enqueued; +} + +static int aesni_gcm_remove(struct rte_vdev_device *vdev); + +static int +aesni_gcm_create(const char *name, + struct rte_vdev_device *vdev, + struct rte_cryptodev_pmd_init_params *init_params) +{ + struct rte_cryptodev *dev; + struct aesni_gcm_private *internals; + enum aesni_gcm_vector_mode vector_mode; + MB_MGR *mb_mgr; + + /* Check CPU for support for AES instruction set */ + if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { + AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU"); + return -EFAULT; + } + dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); + if (dev == NULL) { + AESNI_GCM_LOG(ERR, "driver %s: create failed", + init_params->name); + return -ENODEV; + } + + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + vector_mode = RTE_AESNI_GCM_AVX512; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + vector_mode = RTE_AESNI_GCM_AVX2; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) + vector_mode = RTE_AESNI_GCM_AVX; + else + vector_mode = RTE_AESNI_GCM_SSE; + + dev->driver_id = cryptodev_driver_id; + dev->dev_ops = rte_aesni_gcm_pmd_ops; + + /* register rx/tx burst functions for data path */ + dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst; + dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_CPU_AESNI | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + + mb_mgr = alloc_mb_mgr(0); + if (mb_mgr == NULL) + return -ENOMEM; + + switch (vector_mode) { + case RTE_AESNI_GCM_SSE: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + init_mb_mgr_sse(mb_mgr); + break; + case RTE_AESNI_GCM_AVX: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + init_mb_mgr_avx(mb_mgr); + break; + case RTE_AESNI_GCM_AVX2: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + init_mb_mgr_avx2(mb_mgr); + break; + case RTE_AESNI_GCM_AVX512: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + init_mb_mgr_avx512(mb_mgr); + break; + default: + AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode); + goto error_exit; + } + + internals = dev->data->dev_private; + + internals->vector_mode = vector_mode; + internals->mb_mgr = mb_mgr; + + /* Set arch independent function pointers, based on key size */ + internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc; + internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec; + internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre; + internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init; + internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update; + internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update; + internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize; + internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize; + + internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc; + internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec; + internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre; + internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init; + internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update; + internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update; + internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize; + internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize; + + internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc; + internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec; + internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre; + internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init; + internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update; + internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update; + internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize; + internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize; + + internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", + imb_get_version_str()); +#else + AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n"); +#endif + + return 0; + +error_exit: + if (mb_mgr) + free_mb_mgr(mb_mgr); + + rte_cryptodev_pmd_destroy(dev); + + return -1; +} + +static int +aesni_gcm_probe(struct rte_vdev_device *vdev) +{ + struct rte_cryptodev_pmd_init_params init_params = { + "", + sizeof(struct aesni_gcm_private), + rte_socket_id(), + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS + }; + const char *name; + const char *input_args; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + input_args = rte_vdev_device_args(vdev); + rte_cryptodev_pmd_parse_input_args(&init_params, input_args); + + return aesni_gcm_create(name, vdev, &init_params); +} + +static int +aesni_gcm_remove(struct rte_vdev_device *vdev) +{ + struct rte_cryptodev *cryptodev; + struct aesni_gcm_private *internals; + const char *name; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + cryptodev = rte_cryptodev_pmd_get_named_dev(name); + if (cryptodev == NULL) + return -ENODEV; + + internals = cryptodev->data->dev_private; + + free_mb_mgr(internals->mb_mgr); + + return rte_cryptodev_pmd_destroy(cryptodev); +} + +static struct rte_vdev_driver aesni_gcm_pmd_drv = { + .probe = aesni_gcm_probe, + .remove = aesni_gcm_remove +}; + +static struct cryptodev_driver aesni_gcm_crypto_drv; + +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv); +RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd); +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, + "max_nb_queue_pairs=<int> " + "socket_id=<int>"); +RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver, + cryptodev_driver_id); + + +RTE_INIT(aesni_gcm_init_log) +{ + aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm"); +} diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c new file mode 100644 index 000000000..2f66c7c58 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#include <string.h> + +#include <rte_common.h> +#include <rte_malloc.h> +#include <rte_cryptodev_pmd.h> + +#include "aesni_gcm_pmd_private.h" + +static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { + { /* AES GMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 1, + .max = 16, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 1, + .max = 16, + .increment = 1 + }, + .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +/** Configure device */ +static int +aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) +{ + return 0; +} + +/** Start device */ +static int +aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Stop device */ +static void +aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev) +{ +} + +/** Close device */ +static int +aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + + +/** Get device statistics */ +static void +aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Reset device statistics */ +static void +aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + + +/** Get device info */ +static void +aesni_gcm_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct aesni_gcm_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = aesni_gcm_pmd_capabilities; + + dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; + } +} + +/** Release queue pair */ +static int +aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + if (dev->data->queue_pairs[qp_id] != NULL) { + struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp->processed_pkts) + rte_ring_free(qp->processed_pkts); + + rte_free(dev->data->queue_pairs[qp_id]); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +/** set a unique name for the queue pair based on it's name, dev_id and qp_id */ +static int +aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct aesni_gcm_qp *qp) +{ + unsigned n = snprintf(qp->name, sizeof(qp->name), + "aesni_gcm_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n >= sizeof(qp->name)) + return -1; + + return 0; +} + +/** Create a ring to place process packets on */ +static struct rte_ring * +aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, + unsigned ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (rte_ring_get_size(r) >= ring_size) { + AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed" + " packets", qp->name); + return r; + } + AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed" + " packets", qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +/** Setup a queue pair */ +static int +aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id) +{ + struct aesni_gcm_qp *qp = NULL; + struct aesni_gcm_private *internals = dev->data->dev_private; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + aesni_gcm_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) + goto qp_setup_cleanup; + + qp->ops = (const struct aesni_gcm_ops *)internals->ops; + + qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_pkts == NULL) + goto qp_setup_cleanup; + + qp->sess_mp = qp_conf->mp_session; + qp->sess_mp_priv = qp_conf->mp_session_private; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + + return 0; + +qp_setup_cleanup: + if (qp) + rte_free(qp); + + return -1; +} + +/** Return the number of allocated queue pairs */ +static uint32_t +aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +/** Returns the size of the aesni gcm session structure */ +static unsigned +aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct aesni_gcm_session); +} + +/** Configure a aesni gcm session from a crypto xform chain */ +static int +aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + void *sess_private_data; + int ret; + struct aesni_gcm_private *internals = dev->data->dev_private; + + if (unlikely(sess == NULL)) { + AESNI_GCM_LOG(ERR, "invalid session struct"); + return -EINVAL; + } + + if (rte_mempool_get(mempool, &sess_private_data)) { + AESNI_GCM_LOG(ERR, + "Couldn't get object from session mempool"); + return -ENOMEM; + } + ret = aesni_gcm_set_session_parameters(internals->ops, + sess_private_data, xform); + if (ret != 0) { + AESNI_GCM_LOG(ERR, "failed configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; + } + + set_sym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +} + +/** Clear the memory of session so it doesn't leave key material behind */ +static void +aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + memset(sess_priv, 0, sizeof(struct aesni_gcm_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +struct rte_cryptodev_ops aesni_gcm_pmd_ops = { + .dev_configure = aesni_gcm_pmd_config, + .dev_start = aesni_gcm_pmd_start, + .dev_stop = aesni_gcm_pmd_stop, + .dev_close = aesni_gcm_pmd_close, + + .stats_get = aesni_gcm_pmd_stats_get, + .stats_reset = aesni_gcm_pmd_stats_reset, + + .dev_infos_get = aesni_gcm_pmd_info_get, + + .queue_pair_setup = aesni_gcm_pmd_qp_setup, + .queue_pair_release = aesni_gcm_pmd_qp_release, + .queue_pair_count = aesni_gcm_pmd_qp_count, + + .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size, + .sym_session_configure = aesni_gcm_pmd_sym_session_configure, + .sym_session_clear = aesni_gcm_pmd_sym_session_clear +}; + +struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops; diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h new file mode 100644 index 000000000..56b29e013 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_ +#define _RTE_AESNI_GCM_PMD_PRIVATE_H_ + +#include "aesni_gcm_ops.h" + +/* + * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50, + * so if macro is not defined, it means that the version is 0.49. + */ +#if !defined(IMB_VERSION_NUM) +#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0) +#endif + +#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm +/**< AES-NI GCM PMD device name */ + +/** AES-NI GCM PMD LOGTYPE DRIVER */ +int aesni_gcm_logtype_driver; +#define AESNI_GCM_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) + +/* Maximum length for digest */ +#define DIGEST_LENGTH_MAX 16 + +/** private data structure for each virtual AESNI GCM device */ +struct aesni_gcm_private { + enum aesni_gcm_vector_mode vector_mode; + /**< Vector mode */ + unsigned max_nb_queue_pairs; + /**< Max number of queue pairs supported by device */ + MB_MGR *mb_mgr; + /**< Multi-buffer instance */ + struct aesni_gcm_ops ops[GCM_KEY_NUM]; + /**< Function pointer table of the gcm APIs */ +}; + +struct aesni_gcm_qp { + const struct aesni_gcm_ops *ops; + /**< Function pointer table of the gcm APIs */ + struct rte_ring *processed_pkts; + /**< Ring for placing process packets */ + struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */ + /**< GCM parameters */ + struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */ + /**< Queue pair statistics */ + struct rte_mempool *sess_mp; + /**< Session Mempool */ + struct rte_mempool *sess_mp_priv; + /**< Session Private Data Mempool */ + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + /**< Unique Queue Pair Name */ + uint8_t temp_digest[DIGEST_LENGTH_MAX]; + /**< Buffer used to store the digest generated + * by the driver when verifying a digest provided + * by the user (using authentication verify operation) + */ +} __rte_cache_aligned; + + +enum aesni_gcm_operation { + AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION, + AESNI_GCM_OP_AUTHENTICATED_DECRYPTION, + AESNI_GMAC_OP_GENERATE, + AESNI_GMAC_OP_VERIFY +}; + +/** AESNI GCM private session structure */ +struct aesni_gcm_session { + struct { + uint16_t length; + uint16_t offset; + } iv; + /**< IV parameters */ + uint16_t aad_length; + /**< AAD length */ + uint16_t req_digest_length; + /**< Requested digest length */ + uint16_t gen_digest_length; + /**< Generated digest length */ + enum aesni_gcm_operation op; + /**< GCM operation type */ + enum aesni_gcm_key key; + /**< GCM key type */ + struct gcm_key_data gdata_key; + /**< GCM parameters */ +}; + + +/** + * Setup GCM session parameters + * @param sess aesni gcm session structure + * @param xform crypto transform chain + * + * @return + * - On success returns 0 + * - On failure returns error code < 0 + */ +extern int +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops, + struct aesni_gcm_session *sess, + const struct rte_crypto_sym_xform *xform); + + +/** + * Device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops; + + +#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/meson.build b/src/seastar/dpdk/drivers/crypto/aesni_gcm/meson.build new file mode 100644 index 000000000..7183cfcba --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/meson.build @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +IMB_required_ver = '0.52.0' +lib = cc.find_library('IPSec_MB', required: false) +if not lib.found() + build = false +else + ext_deps += lib + + # version comes with quotes, so we split based on " and take the middle + imb_ver = cc.get_define('IMB_VERSION_STR', + prefix : '#include<intel-ipsec-mb.h>').split('"')[1] + + if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver)) + message('IPSec_MB version >= @0@ is required, found version @1@'.format( + IMB_required_ver, imb_ver)) + build = false + endif +endif + +allow_experimental_apis = true +sources = files('aesni_gcm_pmd.c', 'aesni_gcm_pmd_ops.c') +deps += ['bus_vdev'] diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map new file mode 100644 index 000000000..dc4d417b7 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map @@ -0,0 +1,3 @@ +DPDK_16.04 { + local: *; +}; |