diff options
Diffstat (limited to 'src/seastar/dpdk/drivers/crypto/octeontx')
13 files changed, 2797 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/Makefile b/src/seastar/dpdk/drivers/crypto/octeontx/Makefile new file mode 100644 index 000000000..2752cbc36 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_octeontx_crypto.a + +# library version +LIBABIVER := 1 + +# build flags +CFLAGS += $(WERROR_FLAGS) + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_common_cpt + +VPATH += $(RTE_SDK)/drivers/crypto/octeontx + +CFLAGS += -O3 +CFLAGS += -I$(RTE_SDK)/drivers/common/cpt +CFLAGS += -DALLOW_EXPERIMENTAL_API + +# PMD code +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_capabilities.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_hw_access.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_mbox.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_ops.c + +# export include files +SYMLINK-y-include += + +# versioning export map +EXPORT_MAP := rte_pmd_octeontx_crypto_version.map + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/meson.build b/src/seastar/dpdk/drivers/crypto/octeontx/meson.build new file mode 100644 index 000000000..a9f2d3157 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +if not is_linux + build = false +endif + +deps += ['bus_pci'] +deps += ['common_cpt'] +name = 'octeontx_crypto' + +allow_experimental_apis = true +sources = files('otx_cryptodev.c', + 'otx_cryptodev_capabilities.c', + 'otx_cryptodev_hw_access.c', + 'otx_cryptodev_mbox.c', + 'otx_cryptodev_ops.c') + +includes += include_directories('../../common/cpt') diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev.c b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev.c new file mode 100644 index 000000000..fc64a5f30 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev.c @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include <rte_bus_pci.h> +#include <rte_common.h> +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_log.h> +#include <rte_pci.h> + +/* CPT common headers */ +#include "cpt_pmd_logs.h" + +#include "otx_cryptodev.h" +#include "otx_cryptodev_ops.h" + +static int otx_cryptodev_logtype; + +static struct rte_pci_id pci_id_cpt_table[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), + }, + /* sentinel */ + { + .device_id = 0 + }, +}; + +static void +otx_cpt_logtype_init(void) +{ + cpt_logtype = otx_cryptodev_logtype; +} + +static int +otx_cpt_pci_probe(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + struct rte_cryptodev *cryptodev; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + int retval; + + if (pci_drv == NULL) + return -ENODEV; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + cryptodev = rte_cryptodev_pmd_allocate(name, rte_socket_id()); + if (cryptodev == NULL) + return -ENOMEM; + + cryptodev->device = &pci_dev->device; + cryptodev->device->driver = &pci_drv->driver; + cryptodev->driver_id = otx_cryptodev_driver_id; + + /* init user callbacks */ + TAILQ_INIT(&(cryptodev->link_intr_cbs)); + + /* init logtype used in common */ + otx_cpt_logtype_init(); + + /* Invoke PMD device initialization function */ + retval = otx_cpt_dev_create(cryptodev); + if (retval == 0) + return 0; + + CPT_LOG_ERR("[DRV %s]: Failed to create device " + "(vendor_id: 0x%x device_id: 0x%x", + pci_drv->driver.name, + (unsigned int) pci_dev->id.vendor_id, + (unsigned int) pci_dev->id.device_id); + + cryptodev->attached = RTE_CRYPTODEV_DETACHED; + + return -ENXIO; +} + +static int +otx_cpt_pci_remove(struct rte_pci_device *pci_dev) +{ + struct rte_cryptodev *cryptodev; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) + return -EINVAL; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + cryptodev = rte_cryptodev_pmd_get_named_dev(name); + if (cryptodev == NULL) + return -ENODEV; + + if (pci_dev->driver == NULL) + return -ENODEV; + + /* free crypto device */ + rte_cryptodev_pmd_release_device(cryptodev); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(cryptodev->data->dev_private); + + cryptodev->device->driver = NULL; + cryptodev->device = NULL; + cryptodev->data = NULL; + + return 0; +} + +static struct rte_pci_driver otx_cryptodev_pmd = { + .id_table = pci_id_cpt_table, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = otx_cpt_pci_probe, + .remove = otx_cpt_pci_remove, +}; + +static struct cryptodev_driver otx_cryptodev_drv; + +RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_OCTEONTX_PMD, otx_cryptodev_pmd); +RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_OCTEONTX_PMD, pci_id_cpt_table); +RTE_PMD_REGISTER_CRYPTO_DRIVER(otx_cryptodev_drv, otx_cryptodev_pmd.driver, + otx_cryptodev_driver_id); + +RTE_INIT(otx_cpt_init_log) +{ + /* Bus level logs */ + otx_cryptodev_logtype = rte_log_register("pmd.crypto.octeontx"); + if (otx_cryptodev_logtype >= 0) + rte_log_set_level(otx_cryptodev_logtype, RTE_LOG_NOTICE); +} diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev.h b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev.h new file mode 100644 index 000000000..6c2871d71 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _OTX_CRYPTODEV_H_ +#define _OTX_CRYPTODEV_H_ + +/* Cavium OCTEON TX crypto PMD device name */ +#define CRYPTODEV_NAME_OCTEONTX_PMD crypto_octeontx + +/* Device ID */ +#define PCI_VENDOR_ID_CAVIUM 0x177d +#define CPT_81XX_PCI_VF_DEVICE_ID 0xa041 + +/* + * Crypto device driver ID + */ +uint8_t otx_cryptodev_driver_id; + +#endif /* _OTX_CRYPTODEV_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_capabilities.c b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_capabilities.c new file mode 100644 index 000000000..946571cf9 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_capabilities.c @@ -0,0 +1,604 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include <rte_cryptodev.h> + +#include "otx_cryptodev_capabilities.h" + +static const struct rte_cryptodev_capabilities otx_capabilities[] = { + /* Symmetric capabilities */ + { /* NULL (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + }, }, + }, }, + }, + { /* AES GMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* KASUMI (F9) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 4, + .max = 4, + .increment = 0 + }, + }, } + }, } + }, + { /* MD5 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 16, + .increment = 1 + }, + }, } + }, } + }, + { /* MD5 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, + .block_size = 64, + .key_size = { + .min = 8, + .max = 64, + .increment = 8 + }, + .digest_size = { + .min = 1, + .max = 16, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA1 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 20, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA1 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 20, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA224 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA224, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 28, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA224 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, + .block_size = 64, + .key_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 28, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA256 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 32, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA256 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .block_size = 64, + .key_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 32, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA384 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 48, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA384 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .block_size = 64, + .key_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 48, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA512 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512, + .block_size = 128, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + }, } + }, } + }, + { /* SHA512 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .block_size = 128, + .key_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + .digest_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + }, } + }, } + }, + { /* SNOW 3G (UIA2) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 4, + .max = 4, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* ZUC (EIA3) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 4, + .max = 4, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* NULL (CIPHER) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .iv_size = { + .min = 0, + .max = 0, + .increment = 0 + } + }, }, + }, } + }, + { /* 3DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 16, + .increment = 8 + } + }, } + }, } + }, + { /* 3DES ECB */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_ECB, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 0, + .max = 0, + .increment = 0 + } + }, } + }, } + }, + { /* AES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 12, + .max = 16, + .increment = 4 + } + }, } + }, } + }, + { /* AES XTS */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_XTS, + .block_size = 16, + .key_size = { + .min = 32, + .max = 64, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_DES_CBC, + .block_size = 8, + .key_size = { + .min = 8, + .max = 8, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + { /* KASUMI (F8) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + { /* SNOW 3G (UEA2) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* ZUC (EEA3) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .aad_size = { + .min = 0, + .max = 1024, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + /* End of symmetric capabilities */ + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +const struct rte_cryptodev_capabilities * +otx_get_capabilities(void) +{ + return otx_capabilities; +} diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_capabilities.h b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_capabilities.h new file mode 100644 index 000000000..fc62821bb --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_capabilities.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _OTX_CRYPTODEV_CAPABILITIES_H_ +#define _OTX_CRYPTODEV_CAPABILITIES_H_ + +#include <rte_cryptodev.h> + +/* + * Get capabilities list for the device + * + */ +const struct rte_cryptodev_capabilities * +otx_get_capabilities(void); + +#endif /* _OTX_CRYPTODEV_CAPABILITIES_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c new file mode 100644 index 000000000..eba629322 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c @@ -0,0 +1,696 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ +#include <assert.h> +#include <string.h> +#include <unistd.h> + +#include <rte_branch_prediction.h> +#include <rte_common.h> +#include <rte_cryptodev.h> +#include <rte_errno.h> +#include <rte_mempool.h> +#include <rte_memzone.h> +#include <rte_string_fns.h> + +#include "otx_cryptodev_hw_access.h" +#include "otx_cryptodev_mbox.h" + +#include "cpt_pmd_logs.h" +#include "cpt_pmd_ops_helper.h" +#include "cpt_hw_types.h" + +#define METABUF_POOL_CACHE_SIZE 512 + +/* + * VF HAL functions + * Access its own BAR0/4 registers by passing VF number as 0. + * OS/PCI maps them accordingly. + */ + +static int +otx_cpt_vf_init(struct cpt_vf *cptvf) +{ + int ret = 0; + + /* Check ready with PF */ + /* Gets chip ID / device Id from PF if ready */ + ret = otx_cpt_check_pf_ready(cptvf); + if (ret) { + CPT_LOG_ERR("%s: PF not responding to READY msg", + cptvf->dev_name); + ret = -EBUSY; + goto exit; + } + + CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); + +exit: + return ret; +} + +/* + * Read Interrupt status of the VF + * + * @param cptvf cptvf structure + */ +static uint64_t +otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf) +{ + return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0)); +} + +/* + * Clear mailbox interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.mbox = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* + * Clear instruction NCB read error interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_irde_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.irde = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* + * Clear NCB result write response error interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.nwrp = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* + * Clear swerr interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.swerr = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* + * Clear hwerr interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.hwerr = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* + * Clear translation fault interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_fault_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.fault = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* + * Clear doorbell overflow interrupt of the VF + * + * @param cptvf cptvf structure + */ +static void +otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf) +{ + cptx_vqx_misc_int_t vqx_misc_int; + + vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0)); + /* W1C for the VF */ + vqx_misc_int.s.dovf = 1; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); +} + +/* Write to VQX_CTL register + */ +static void +otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val) +{ + cptx_vqx_ctl_t vqx_ctl; + + vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_CTL(0, 0)); + vqx_ctl.s.ena = val; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_CTL(0, 0), vqx_ctl.u); +} + +/* Write to VQX_INPROG register + */ +static void +otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val) +{ + cptx_vqx_inprog_t vqx_inprg; + + vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_INPROG(0, 0)); + vqx_inprg.s.inflight = val; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_INPROG(0, 0), vqx_inprg.u); +} + +/* Write to VQX_DONE_WAIT NUMWAIT register + */ +static void +otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val) +{ + cptx_vqx_done_wait_t vqx_dwait; + + vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_DONE_WAIT(0, 0)); + vqx_dwait.s.num_wait = val; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); +} + +/* Write to VQX_DONE_WAIT NUM_WAIT register + */ +static void +otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val) +{ + cptx_vqx_done_wait_t vqx_dwait; + + vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_DONE_WAIT(0, 0)); + vqx_dwait.s.time_wait = val; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); +} + +/* Write to VQX_SADDR register + */ +static void +otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val) +{ + cptx_vqx_saddr_t vqx_saddr; + + vqx_saddr.u = val; + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_SADDR(0, 0), vqx_saddr.u); +} + +static void +otx_cpt_vfvq_init(struct cpt_vf *cptvf) +{ + uint64_t base_addr = 0; + + /* Disable the VQ */ + otx_cpt_write_vq_ctl(cptvf, 0); + + /* Reset the doorbell */ + otx_cpt_write_vq_doorbell(cptvf, 0); + /* Clear inflight */ + otx_cpt_write_vq_inprog(cptvf, 0); + + /* Write VQ SADDR */ + base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr); + otx_cpt_write_vq_saddr(cptvf, base_addr); + + /* Configure timerhold / coalescence */ + otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD); + otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD); + + /* Enable the VQ */ + otx_cpt_write_vq_ctl(cptvf, 1); +} + +static int +cpt_vq_init(struct cpt_vf *cptvf, uint8_t group) +{ + int err; + + /* Convey VQ LEN to PF */ + err = otx_cpt_send_vq_size_msg(cptvf); + if (err) { + CPT_LOG_ERR("%s: PF not responding to QLEN msg", + cptvf->dev_name); + err = -EBUSY; + goto cleanup; + } + + /* CPT VF device initialization */ + otx_cpt_vfvq_init(cptvf); + + /* Send msg to PF to assign currnet Q to required group */ + cptvf->vfgrp = group; + err = otx_cpt_send_vf_grp_msg(cptvf, group); + if (err) { + CPT_LOG_ERR("%s: PF not responding to VF_GRP msg", + cptvf->dev_name); + err = -EBUSY; + goto cleanup; + } + + CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); + return 0; + +cleanup: + return err; +} + +void +otx_cpt_poll_misc(struct cpt_vf *cptvf) +{ + uint64_t intr; + + intr = otx_cpt_read_vf_misc_intr_status(cptvf); + + if (!intr) + return; + + /* Check for MISC interrupt types */ + if (likely(intr & CPT_VF_INTR_MBOX_MASK)) { + CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d", + cptvf->dev_name, (unsigned int long)intr, cptvf->vfid); + otx_cpt_handle_mbox_intr(cptvf); + otx_cpt_clear_mbox_intr(cptvf); + } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) { + otx_cpt_clear_irde_intr(cptvf); + CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt " + "0x%lx on CPT VF %d", cptvf->dev_name, + (unsigned int long)intr, cptvf->vfid); + } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) { + otx_cpt_clear_nwrp_intr(cptvf); + CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx" + " on CPT VF %d", cptvf->dev_name, + (unsigned int long)intr, cptvf->vfid); + } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) { + otx_cpt_clear_swerr_intr(cptvf); + CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF " + "%d", cptvf->dev_name, (unsigned int long)intr, + cptvf->vfid); + } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) { + otx_cpt_clear_hwerr_intr(cptvf); + CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF " + "%d", cptvf->dev_name, (unsigned int long)intr, + cptvf->vfid); + } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) { + otx_cpt_clear_fault_intr(cptvf); + CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF " + "%d", cptvf->dev_name, (unsigned int long)intr, + cptvf->vfid); + } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) { + otx_cpt_clear_dovf_intr(cptvf); + CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF " + "%d", cptvf->dev_name, (unsigned int long)intr, + cptvf->vfid); + } else + CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d", + cptvf->dev_name, (unsigned int long)intr, + cptvf->vfid); +} + +int +otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name) +{ + memset(cptvf, 0, sizeof(struct cpt_vf)); + + /* Bar0 base address */ + cptvf->reg_base = reg_base; + + /* Save device name */ + strlcpy(cptvf->dev_name, name, (sizeof(cptvf->dev_name))); + + cptvf->pdev = pdev; + + /* To clear if there are any pending mbox msgs */ + otx_cpt_poll_misc(cptvf); + + if (otx_cpt_vf_init(cptvf)) { + CPT_LOG_ERR("Failed to initialize CPT VF device"); + return -1; + } + + return 0; +} + +int +otx_cpt_deinit_device(void *dev) +{ + struct cpt_vf *cptvf = (struct cpt_vf *)dev; + + /* Do misc work one last time */ + otx_cpt_poll_misc(cptvf); + + return 0; +} + +static int +otx_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev, + struct cpt_instance *instance, uint8_t qp_id, + int nb_elements) +{ + char mempool_name[RTE_MEMPOOL_NAMESIZE]; + int sg_mlen, lb_mlen, max_mlen, ret; + struct cpt_qp_meta_info *meta_info; + struct rte_mempool *pool; + + /* Get meta len for scatter gather mode */ + sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode(); + + /* Extra 32B saved for future considerations */ + sg_mlen += 4 * sizeof(uint64_t); + + /* Get meta len for linear buffer (direct) mode */ + lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode(); + + /* Extra 32B saved for future considerations */ + lb_mlen += 4 * sizeof(uint64_t); + + /* Check max requirement for meta buffer */ + max_mlen = RTE_MAX(lb_mlen, sg_mlen); + + /* Allocate mempool */ + + snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx_cpt_mb_%u:%u", + dev->data->dev_id, qp_id); + + pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen, + METABUF_POOL_CACHE_SIZE, 0, + rte_socket_id(), 0); + + if (pool == NULL) { + CPT_LOG_ERR("Could not create mempool for metabuf"); + return rte_errno; + } + + ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS, + NULL); + if (ret) { + CPT_LOG_ERR("Could not set mempool ops"); + goto mempool_free; + } + + ret = rte_mempool_populate_default(pool); + if (ret <= 0) { + CPT_LOG_ERR("Could not populate metabuf pool"); + goto mempool_free; + } + + meta_info = &instance->meta_info; + + meta_info->pool = pool; + meta_info->lb_mlen = lb_mlen; + meta_info->sg_mlen = sg_mlen; + + return 0; + +mempool_free: + rte_mempool_free(pool); + return ret; +} + +static void +otx_cpt_metabuf_mempool_destroy(struct cpt_instance *instance) +{ + struct cpt_qp_meta_info *meta_info = &instance->meta_info; + + rte_mempool_free(meta_info->pool); + + meta_info->pool = NULL; + meta_info->lb_mlen = 0; + meta_info->sg_mlen = 0; +} + +int +otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group, + struct cpt_instance **instance, uint16_t qp_id) +{ + int ret = -ENOENT, len, qlen, i; + int chunk_len, chunks, chunk_size; + struct cpt_vf *cptvf = dev->data->dev_private; + struct cpt_instance *cpt_instance; + struct command_chunk *chunk_head = NULL, *chunk_prev = NULL; + struct command_chunk *chunk = NULL; + uint8_t *mem; + const struct rte_memzone *rz; + uint64_t dma_addr = 0, alloc_len, used_len; + uint64_t *next_ptr; + uint64_t pg_sz = sysconf(_SC_PAGESIZE); + + CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name); + + cpt_instance = &cptvf->instance; + + memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue)); + memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue)); + + /* Chunks are of fixed size buffers */ + chunks = DEFAULT_CMD_QCHUNKS; + chunk_len = DEFAULT_CMD_QCHUNK_SIZE; + + qlen = chunks * chunk_len; + /* Chunk size includes 8 bytes of next chunk ptr */ + chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE; + + /* For command chunk structures */ + len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); + + /* For pending queue */ + len += qlen * RTE_ALIGN(sizeof(struct rid), 8); + + /* So that instruction queues start as pg size aligned */ + len = RTE_ALIGN(len, pg_sz); + + /* For Instruction queues */ + len += chunks * RTE_ALIGN(chunk_size, 128); + + /* Wastage after instruction queues */ + len = RTE_ALIGN(len, pg_sz); + + rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node, + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_256MB, + RTE_CACHE_LINE_SIZE); + if (!rz) { + ret = rte_errno; + goto exit; + } + + mem = rz->addr; + dma_addr = rz->phys_addr; + alloc_len = len; + + memset(mem, 0, len); + + cpt_instance->rsvd = (uintptr_t)rz; + + ret = otx_cpt_metabuf_mempool_create(dev, cpt_instance, qp_id, qlen); + if (ret) { + CPT_LOG_ERR("Could not create mempool for metabuf"); + goto memzone_free; + } + + /* Pending queue setup */ + cptvf->pqueue.rid_queue = (struct rid *)mem; + cptvf->pqueue.enq_tail = 0; + cptvf->pqueue.deq_head = 0; + cptvf->pqueue.pending_count = 0; + + mem += qlen * RTE_ALIGN(sizeof(struct rid), 8); + len -= qlen * RTE_ALIGN(sizeof(struct rid), 8); + dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8); + + /* Alignment wastage */ + used_len = alloc_len - len; + mem += RTE_ALIGN(used_len, pg_sz) - used_len; + len -= RTE_ALIGN(used_len, pg_sz) - used_len; + dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len; + + /* Init instruction queues */ + chunk_head = &cptvf->cqueue.chead[0]; + i = qlen; + + chunk_prev = NULL; + for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) { + int csize; + + chunk = &cptvf->cqueue.chead[i]; + chunk->head = mem; + chunk->dma_addr = dma_addr; + + csize = RTE_ALIGN(chunk_size, 128); + mem += csize; + dma_addr += csize; + len -= csize; + + if (chunk_prev) { + next_ptr = (uint64_t *)(chunk_prev->head + + chunk_size - 8); + *next_ptr = (uint64_t)chunk->dma_addr; + } + chunk_prev = chunk; + } + /* Circular loop */ + next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); + *next_ptr = (uint64_t)chunk_head->dma_addr; + + assert(!len); + + /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */ + cptvf->qsize = chunk_size / 8; + cptvf->cqueue.qhead = chunk_head->head; + cptvf->cqueue.idx = 0; + cptvf->cqueue.cchunk = 0; + + if (cpt_vq_init(cptvf, group)) { + CPT_LOG_ERR("Failed to initialize CPT VQ of device %s", + cptvf->dev_name); + ret = -EBUSY; + goto mempool_destroy; + } + + *instance = cpt_instance; + + CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name); + + return 0; + +mempool_destroy: + otx_cpt_metabuf_mempool_destroy(cpt_instance); +memzone_free: + rte_memzone_free(rz); +exit: + *instance = NULL; + return ret; +} + +int +otx_cpt_put_resource(struct cpt_instance *instance) +{ + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + struct rte_memzone *rz; + + if (!cptvf) { + CPT_LOG_ERR("Invalid CPTVF handle"); + return -EINVAL; + } + + CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name); + + otx_cpt_metabuf_mempool_destroy(instance); + + rz = (struct rte_memzone *)instance->rsvd; + rte_memzone_free(rz); + return 0; +} + +int +otx_cpt_start_device(void *dev) +{ + int rc; + struct cpt_vf *cptvf = (struct cpt_vf *)dev; + + rc = otx_cpt_send_vf_up(cptvf); + if (rc) { + CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d", + cptvf->dev_name, rc); + return -EFAULT; + } + + if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) { + CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF " + "device %s", cptvf->vftype, cptvf->dev_name); + return -ENOENT; + } + + return 0; +} + +void +otx_cpt_stop_device(void *dev) +{ + int rc; + uint32_t pending, retries = 5; + struct cpt_vf *cptvf = (struct cpt_vf *)dev; + + /* Wait for pending entries to complete */ + pending = otx_cpt_read_vq_doorbell(cptvf); + while (pending) { + CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete", + cptvf->dev_name, pending); + sleep(1); + pending = otx_cpt_read_vq_doorbell(cptvf); + retries--; + if (!retries) + break; + } + + if (!retries && pending) { + CPT_LOG_ERR("%s: Timeout waiting for commands(%u)", + cptvf->dev_name, pending); + return; + } + + rc = otx_cpt_send_vf_down(cptvf); + if (rc) { + CPT_LOG_ERR("Failed to bring down vf %s, rc %d", + cptvf->dev_name, rc); + return; + } +} diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h new file mode 100644 index 000000000..63c199ea2 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ +#ifndef _OTX_CRYPTODEV_HW_ACCESS_H_ +#define _OTX_CRYPTODEV_HW_ACCESS_H_ + +#include <stdbool.h> + +#include <rte_branch_prediction.h> +#include <rte_cryptodev.h> +#include <rte_cycles.h> +#include <rte_io.h> +#include <rte_memory.h> +#include <rte_prefetch.h> + +#include "cpt_common.h" +#include "cpt_hw_types.h" +#include "cpt_mcode_defines.h" +#include "cpt_pmd_logs.h" + +#define CPT_INTR_POLL_INTERVAL_MS (50) + +/* Default command queue length */ +#define DEFAULT_CMD_QCHUNKS 2 +#define DEFAULT_CMD_QCHUNK_SIZE 1023 +#define DEFAULT_CMD_QLEN \ + (DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS) + +#define CPT_CSR_REG_BASE(cpt) ((cpt)->reg_base) + +/* Read hw register */ +#define CPT_READ_CSR(__hw_addr, __offset) \ + rte_read64_relaxed((uint8_t *)__hw_addr + __offset) + +/* Write hw register */ +#define CPT_WRITE_CSR(__hw_addr, __offset, __val) \ + rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset)) + +/* cpt instance */ +struct cpt_instance { + uint32_t queue_id; + uintptr_t rsvd; + struct rte_mempool *sess_mp; + struct rte_mempool *sess_mp_priv; + struct cpt_qp_meta_info meta_info; +}; + +struct command_chunk { + /** 128-byte aligned real_vaddr */ + uint8_t *head; + /** 128-byte aligned real_dma_addr */ + phys_addr_t dma_addr; +}; + +/** + * Command queue structure + */ +struct command_queue { + /** Command queue host write idx */ + uint32_t idx; + /** Command queue chunk */ + uint32_t cchunk; + /** Command queue head; instructions are inserted here */ + uint8_t *qhead; + /** Command chunk list head */ + struct command_chunk chead[DEFAULT_CMD_QCHUNKS]; +}; + +/** + * CPT VF device structure + */ +struct cpt_vf { + /** CPT instance */ + struct cpt_instance instance; + /** Register start address */ + uint8_t *reg_base; + /** Command queue information */ + struct command_queue cqueue; + /** Pending queue information */ + struct pending_queue pqueue; + + /** Below fields are accessed only in control path */ + + /** Env specific pdev representing the pci dev */ + void *pdev; + /** Calculated queue size */ + uint32_t qsize; + /** Device index (0...CPT_MAX_VQ_NUM)*/ + uint8_t vfid; + /** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */ + uint8_t vftype; + /** VF group (0 - 8) */ + uint8_t vfgrp; + /** Operating node: Bits (46:44) in BAR0 address */ + uint8_t node; + + /** VF-PF mailbox communication */ + + /** Flag if acked */ + bool pf_acked; + /** Flag if not acked */ + bool pf_nacked; + + /** Device name */ + char dev_name[32]; +} __rte_cache_aligned; + +/* + * CPT Registers map for 81xx + */ + +/* VF registers */ +#define CPTX_VQX_CTL(a, b) (0x0000100ll + 0x1000000000ll * \ + ((a) & 0x0) + 0x100000ll * (b)) +#define CPTX_VQX_SADDR(a, b) (0x0000200ll + 0x1000000000ll * \ + ((a) & 0x0) + 0x100000ll * (b)) +#define CPTX_VQX_DONE_WAIT(a, b) (0x0000400ll + 0x1000000000ll * \ + ((a) & 0x0) + 0x100000ll * (b)) +#define CPTX_VQX_INPROG(a, b) (0x0000410ll + 0x1000000000ll * \ + ((a) & 0x0) + 0x100000ll * (b)) +#define CPTX_VQX_DONE(a, b) (0x0000420ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_DONE_ACK(a, b) (0x0000440ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_DONE_INT_W1S(a, b) (0x0000460ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_DONE_INT_W1C(a, b) (0x0000468ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_DONE_ENA_W1S(a, b) (0x0000470ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_DONE_ENA_W1C(a, b) (0x0000478ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_MISC_INT(a, b) (0x0000500ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_MISC_INT_W1S(a, b) (0x0000508ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_MISC_ENA_W1S(a, b) (0x0000510ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_MISC_ENA_W1C(a, b) (0x0000518ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VQX_DOORBELL(a, b) (0x0000600ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b)) +#define CPTX_VFX_PF_MBOXX(a, b, c) (0x0001000ll + 0x1000000000ll * \ + ((a) & 0x1) + 0x100000ll * (b) + \ + 8ll * ((c) & 0x1)) + +/* VF HAL functions */ + +void +otx_cpt_poll_misc(struct cpt_vf *cptvf); + +int +otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name); + +int +otx_cpt_deinit_device(void *dev); + +int +otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group, + struct cpt_instance **instance, uint16_t qp_id); + +int +otx_cpt_put_resource(struct cpt_instance *instance); + +int +otx_cpt_start_device(void *cptvf); + +void +otx_cpt_stop_device(void *cptvf); + +/* Write to VQX_DOORBELL register + */ +static __rte_always_inline void +otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val) +{ + cptx_vqx_doorbell_t vqx_dbell; + + vqx_dbell.u = 0; + vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */ + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u); +} + +static __rte_always_inline uint32_t +otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf) +{ + cptx_vqx_doorbell_t vqx_dbell; + + vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VQX_DOORBELL(0, 0)); + return vqx_dbell.s.dbell_cnt; +} + +static __rte_always_inline void +otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count) +{ + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + /* Memory barrier to flush pending writes */ + rte_smp_wmb(); + otx_cpt_write_vq_doorbell(cptvf, count); +} + +static __rte_always_inline void * +get_cpt_inst(struct command_queue *cqueue) +{ + CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx); + return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE]; +} + +static __rte_always_inline void +fill_cpt_inst(struct cpt_instance *instance, void *req) +{ + struct command_queue *cqueue; + cpt_inst_s_t *cpt_ist_p; + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + struct cpt_request_info *user_req = (struct cpt_request_info *)req; + cqueue = &cptvf->cqueue; + cpt_ist_p = get_cpt_inst(cqueue); + rte_prefetch_non_temporal(cpt_ist_p); + + /* EI0, EI1, EI2, EI3 are already prepared */ + /* HW W0 */ + cpt_ist_p->u[0] = 0; + /* HW W1 */ + cpt_ist_p->s8x.res_addr = user_req->comp_baddr; + /* HW W2 */ + cpt_ist_p->u[2] = 0; + /* HW W3 */ + cpt_ist_p->s8x.wq_ptr = 0; + + /* MC EI0 */ + cpt_ist_p->s8x.ei0 = user_req->ist.ei0; + /* MC EI1 */ + cpt_ist_p->s8x.ei1 = user_req->ist.ei1; + /* MC EI2 */ + cpt_ist_p->s8x.ei2 = user_req->ist.ei2; + /* MC EI3 */ + cpt_ist_p->s8x.ei3 = user_req->ist.ei3; +} + +static __rte_always_inline void +mark_cpt_inst(struct cpt_instance *instance) +{ + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + struct command_queue *queue = &cptvf->cqueue; + if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) { + uint32_t cchunk = queue->cchunk; + MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS); + queue->qhead = queue->chead[cchunk].head; + queue->idx = 0; + queue->cchunk = cchunk; + } +} + +static __rte_always_inline uint8_t +check_nb_command_id(struct cpt_request_info *user_req, + struct cpt_instance *instance) +{ + uint8_t ret = ERR_REQ_PENDING; + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + volatile cpt_res_s_t *cptres; + + cptres = (volatile cpt_res_s_t *)user_req->completion_addr; + + if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) { + /* + * Wait for some time for this command to get completed + * before timing out + */ + if (rte_get_timer_cycles() < user_req->time_out) + return ret; + /* + * TODO: See if alternate caddr can be used to not loop + * longer than needed. + */ + if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) && + (user_req->extra_time < TIME_IN_RESET_COUNT)) { + user_req->extra_time++; + return ret; + } + + if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE) + goto complete; + + ret = ERR_REQ_TIMEOUT; + CPT_LOG_DP_ERR("Request %p timedout", user_req); + otx_cpt_poll_misc(cptvf); + goto exit; + } + +complete: + if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) { + ret = 0; /* success */ + if (unlikely((uint8_t)*user_req->alternate_caddr)) { + ret = (uint8_t)*user_req->alternate_caddr; + CPT_LOG_DP_ERR("Request %p : failed with microcode" + " error, MC completion code : 0x%x", user_req, + ret); + } + CPT_LOG_DP_DEBUG("MC status %.8x\n", + *((volatile uint32_t *)user_req->alternate_caddr)); + CPT_LOG_DP_DEBUG("HW status %.8x\n", + *((volatile uint32_t *)user_req->completion_addr)); + } else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) || + (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) { + ret = (uint8_t)*user_req->alternate_caddr; + if (!ret) + ret = ERR_BAD_ALT_CCODE; + CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x", + user_req, + (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ? + "DMA Fault" : "Software error", ret); + } else { + CPT_LOG_DP_ERR("Request %p : unexpected completion code %d", + user_req, cptres->s8x.compcode); + ret = (uint8_t)*user_req->alternate_caddr; + } + +exit: + return ret; +} + +#endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.c b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.c new file mode 100644 index 000000000..a8e51a8e7 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.c @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include <unistd.h> + +#include "otx_cryptodev_hw_access.h" +#include "otx_cryptodev_mbox.h" + +void +otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf) +{ + struct cpt_mbox mbx = {0, 0}; + + /* + * MBOX[0] contains msg + * MBOX[1] contains data + */ + mbx.msg = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VFX_PF_MBOXX(0, 0, 0)); + mbx.data = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VFX_PF_MBOXX(0, 0, 1)); + + CPT_LOG_DP_DEBUG("%s: Mailbox msg 0x%lx from PF", + cptvf->dev_name, (unsigned int long)mbx.msg); + switch (mbx.msg) { + case OTX_CPT_MSG_READY: + { + otx_cpt_chipid_vfid_t cid; + + cid.u64 = mbx.data; + cptvf->pf_acked = true; + cptvf->vfid = cid.s.vfid; + CPT_LOG_DP_DEBUG("%s: Received VFID %d chip_id %d", + cptvf->dev_name, + cptvf->vfid, cid.s.chip_id); + } + break; + case OTX_CPT_MSG_QBIND_GRP: + cptvf->pf_acked = true; + cptvf->vftype = mbx.data; + CPT_LOG_DP_DEBUG("%s: VF %d type %s group %d", + cptvf->dev_name, cptvf->vfid, + ((mbx.data == SE_TYPE) ? "SE" : "AE"), + cptvf->vfgrp); + break; + case OTX_CPT_MBOX_MSG_TYPE_ACK: + cptvf->pf_acked = true; + break; + case OTX_CPT_MBOX_MSG_TYPE_NACK: + cptvf->pf_nacked = true; + break; + default: + CPT_LOG_DP_DEBUG("%s: Invalid msg from PF, msg 0x%lx", + cptvf->dev_name, (unsigned int long)mbx.msg); + break; + } +} + +/* Send a mailbox message to PF + * @vf: vf from which this message to be sent + * @mbx: Message to be sent + */ +static void +otx_cpt_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx) +{ + /* Writing mbox(1) causes interrupt */ + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VFX_PF_MBOXX(0, 0, 0), mbx->msg); + CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), + CPTX_VFX_PF_MBOXX(0, 0, 1), mbx->data); +} + +static int32_t +otx_cpt_send_msg_to_pf_timeout(struct cpt_vf *cptvf, struct cpt_mbox *mbx) +{ + int timeout = OTX_CPT_MBOX_MSG_TIMEOUT; + int sleep_ms = 10; + + cptvf->pf_acked = false; + cptvf->pf_nacked = false; + + otx_cpt_send_msg_to_pf(cptvf, mbx); + + /* Wait for previous message to be acked, timeout 2sec */ + while (!cptvf->pf_acked) { + if (cptvf->pf_nacked) + return -EINVAL; + usleep(sleep_ms * 1000); + otx_cpt_poll_misc(cptvf); + if (cptvf->pf_acked) + break; + timeout -= sleep_ms; + if (!timeout) { + CPT_LOG_ERR("%s: PF didn't ack mbox msg %lx(vfid %u)", + cptvf->dev_name, + (unsigned int long)(mbx->msg & 0xFF), + cptvf->vfid); + return -EBUSY; + } + } + return 0; +} + +int +otx_cpt_check_pf_ready(struct cpt_vf *cptvf) +{ + struct cpt_mbox mbx = {0, 0}; + + mbx.msg = OTX_CPT_MSG_READY; + if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) { + CPT_LOG_ERR("%s: PF didn't respond to READY msg", + cptvf->dev_name); + return 1; + } + return 0; +} + +int +otx_cpt_send_vq_size_msg(struct cpt_vf *cptvf) +{ + struct cpt_mbox mbx = {0, 0}; + + mbx.msg = OTX_CPT_MSG_QLEN; + + mbx.data = cptvf->qsize; + if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) { + CPT_LOG_ERR("%s: PF didn't respond to vq_size msg", + cptvf->dev_name); + return 1; + } + return 0; +} + +int +otx_cpt_send_vf_grp_msg(struct cpt_vf *cptvf, uint32_t group) +{ + struct cpt_mbox mbx = {0, 0}; + + mbx.msg = OTX_CPT_MSG_QBIND_GRP; + + /* Convey group of the VF */ + mbx.data = group; + if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) { + CPT_LOG_ERR("%s: PF didn't respond to vf_type msg", + cptvf->dev_name); + return 1; + } + return 0; +} + +int +otx_cpt_send_vf_up(struct cpt_vf *cptvf) +{ + struct cpt_mbox mbx = {0, 0}; + + mbx.msg = OTX_CPT_MSG_VF_UP; + if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) { + CPT_LOG_ERR("%s: PF didn't respond to UP msg", + cptvf->dev_name); + return 1; + } + return 0; +} + +int +otx_cpt_send_vf_down(struct cpt_vf *cptvf) +{ + struct cpt_mbox mbx = {0, 0}; + + mbx.msg = OTX_CPT_MSG_VF_DOWN; + if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) { + CPT_LOG_ERR("%s: PF didn't respond to DOWN msg", + cptvf->dev_name); + return 1; + } + return 0; +} diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h new file mode 100644 index 000000000..b05d1c50d --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_mbox.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _OTX_CRYPTODEV_MBOX_H_ +#define _OTX_CRYPTODEV_MBOX_H_ + +#include <rte_byteorder.h> +#include <rte_common.h> + +#include "cpt_common.h" +#include "cpt_pmd_logs.h" + +#include "otx_cryptodev_hw_access.h" + +#define OTX_CPT_MBOX_MSG_TIMEOUT 2000 /* In Milli Seconds */ + +#define OTX_CPT_MBOX_MSG_TYPE_REQ 0 +#define OTX_CPT_MBOX_MSG_TYPE_ACK 1 +#define OTX_CPT_MBOX_MSG_TYPE_NACK 2 +#define OTX_CPT_MBOX_MSG_TYPE_NOP 3 + +/* CPT mailbox structure */ +struct cpt_mbox { + /** Message type MBOX[0] */ + uint64_t msg; + /** Data MBOX[1] */ + uint64_t data; +}; + +typedef enum { + OTX_CPT_MSG_VF_UP = 1, + OTX_CPT_MSG_VF_DOWN, + OTX_CPT_MSG_READY, + OTX_CPT_MSG_QLEN, + OTX_CPT_MSG_QBIND_GRP, + OTX_CPT_MSG_VQ_PRIORITY, + OTX_CPT_MSG_PF_TYPE, +} otx_cpt_mbox_opcode_t; + +typedef union { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint32_t chip_id; + uint8_t vfid; + uint8_t reserved[3]; +#else + uint8_t reserved[3]; + uint8_t vfid; + uint32_t chip_id; +#endif + } s; +} otx_cpt_chipid_vfid_t; + +/* Poll handler to handle mailbox messages from VFs */ +void +otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf); + +/* + * Checks if VF is able to comminicate with PF + * and also gets the CPT number this VF is associated to. + */ +int +otx_cpt_check_pf_ready(struct cpt_vf *cptvf); + +/* + * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF. + * Must be ACKed. + */ +int +otx_cpt_send_vq_size_msg(struct cpt_vf *cptvf); + +/* + * Communicate VF group required to PF and get the VQ binded to that group + */ +int +otx_cpt_send_vf_grp_msg(struct cpt_vf *cptvf, uint32_t group); + +/* + * Communicate to PF that VF is UP and running + */ +int +otx_cpt_send_vf_up(struct cpt_vf *cptvf); + +/* + * Communicate to PF that VF is DOWN and running + */ +int +otx_cpt_send_vf_down(struct cpt_vf *cptvf); + +#endif /* _OTX_CRYPTODEV_MBOX_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c new file mode 100644 index 000000000..9628ffa03 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c @@ -0,0 +1,660 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include <rte_alarm.h> +#include <rte_bus_pci.h> +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_errno.h> +#include <rte_malloc.h> +#include <rte_mempool.h> + +#include "cpt_pmd_logs.h" +#include "cpt_ucode.h" + +#include "otx_cryptodev.h" +#include "otx_cryptodev_capabilities.h" +#include "otx_cryptodev_hw_access.h" +#include "otx_cryptodev_ops.h" + +/* Forward declarations */ + +static int +otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id); + +/* Alarm routines */ + +static void +otx_cpt_alarm_cb(void *arg) +{ + struct cpt_vf *cptvf = arg; + otx_cpt_poll_misc(cptvf); + rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000, + otx_cpt_alarm_cb, cptvf); +} + +static int +otx_cpt_periodic_alarm_start(void *arg) +{ + return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000, + otx_cpt_alarm_cb, arg); +} + +static int +otx_cpt_periodic_alarm_stop(void *arg) +{ + return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg); +} + +/* PMD ops */ + +static int +otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused, + struct rte_cryptodev_config *config __rte_unused) +{ + CPT_PMD_INIT_FUNC_TRACE(); + return 0; +} + +static int +otx_cpt_dev_start(struct rte_cryptodev *c_dev) +{ + void *cptvf = c_dev->data->dev_private; + + CPT_PMD_INIT_FUNC_TRACE(); + + return otx_cpt_start_device(cptvf); +} + +static void +otx_cpt_dev_stop(struct rte_cryptodev *c_dev) +{ + void *cptvf = c_dev->data->dev_private; + + CPT_PMD_INIT_FUNC_TRACE(); + + otx_cpt_stop_device(cptvf); +} + +static int +otx_cpt_dev_close(struct rte_cryptodev *c_dev) +{ + void *cptvf = c_dev->data->dev_private; + int i, ret; + + CPT_PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < c_dev->data->nb_queue_pairs; i++) { + ret = otx_cpt_que_pair_release(c_dev, i); + if (ret) + return ret; + } + + otx_cpt_periodic_alarm_stop(cptvf); + otx_cpt_deinit_device(cptvf); + + return 0; +} + +static void +otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info) +{ + CPT_PMD_INIT_FUNC_TRACE(); + if (info != NULL) { + info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF; + info->feature_flags = dev->feature_flags; + info->capabilities = otx_get_capabilities(); + info->sym.max_nb_sessions = 0; + info->driver_id = otx_cryptodev_driver_id; + info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ; + info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ; + } +} + +static void +otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused, + struct rte_cryptodev_stats *stats __rte_unused) +{ + CPT_PMD_INIT_FUNC_TRACE(); +} + +static void +otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused) +{ + CPT_PMD_INIT_FUNC_TRACE(); +} + +static int +otx_cpt_que_pair_setup(struct rte_cryptodev *dev, + uint16_t que_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id __rte_unused) +{ + struct cpt_instance *instance = NULL; + struct rte_pci_device *pci_dev; + int ret = -1; + + CPT_PMD_INIT_FUNC_TRACE(); + + if (dev->data->queue_pairs[que_pair_id] != NULL) { + ret = otx_cpt_que_pair_release(dev, que_pair_id); + if (ret) + return ret; + } + + if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) { + CPT_LOG_INFO("Number of descriptors too big %d, using default " + "queue length of %d", qp_conf->nb_descriptors, + DEFAULT_CMD_QLEN); + } + + pci_dev = RTE_DEV_TO_PCI(dev->device); + + if (pci_dev->mem_resource[0].addr == NULL) { + CPT_LOG_ERR("PCI mem address null"); + return -EIO; + } + + ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id); + if (ret != 0 || instance == NULL) { + CPT_LOG_ERR("Error getting instance handle from device %s : " + "ret = %d", dev->data->name, ret); + return ret; + } + + instance->queue_id = que_pair_id; + instance->sess_mp = qp_conf->mp_session; + instance->sess_mp_priv = qp_conf->mp_session_private; + dev->data->queue_pairs[que_pair_id] = instance; + + return 0; +} + +static int +otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id) +{ + struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id]; + int ret; + + CPT_PMD_INIT_FUNC_TRACE(); + + ret = otx_cpt_put_resource(instance); + if (ret != 0) { + CPT_LOG_ERR("Error putting instance handle of device %s : " + "ret = %d", dev->data->name, ret); + return ret; + } + + dev->data->queue_pairs[que_pair_id] = NULL; + + return 0; +} + +static unsigned int +otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused) +{ + return cpt_get_session_size(); +} + +static void +otx_cpt_session_init(void *sym_sess, uint8_t driver_id) +{ + struct rte_cryptodev_sym_session *sess = sym_sess; + struct cpt_sess_misc *cpt_sess = + (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id); + + CPT_PMD_INIT_FUNC_TRACE(); + cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) + + sizeof(struct cpt_sess_misc); +} + +static int +otx_cpt_session_cfg(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + struct rte_crypto_sym_xform *chain; + void *sess_private_data = NULL; + + CPT_PMD_INIT_FUNC_TRACE(); + + if (cpt_is_algo_supported(xform)) + goto err; + + if (unlikely(sess == NULL)) { + CPT_LOG_ERR("invalid session struct"); + return -EINVAL; + } + + if (rte_mempool_get(mempool, &sess_private_data)) { + CPT_LOG_ERR("Could not allocate sess_private_data"); + return -ENOMEM; + } + + chain = xform; + while (chain) { + switch (chain->type) { + case RTE_CRYPTO_SYM_XFORM_AEAD: + if (fill_sess_aead(chain, sess_private_data)) + goto err; + break; + case RTE_CRYPTO_SYM_XFORM_CIPHER: + if (fill_sess_cipher(chain, sess_private_data)) + goto err; + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { + if (fill_sess_gmac(chain, sess_private_data)) + goto err; + } else { + if (fill_sess_auth(chain, sess_private_data)) + goto err; + } + break; + default: + CPT_LOG_ERR("Invalid crypto xform type"); + break; + } + chain = chain->next; + } + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); + otx_cpt_session_init(sess, dev->driver_id); + return 0; + +err: + if (sess_private_data) + rte_mempool_put(mempool, sess_private_data); + return -EPERM; +} + +static void +otx_cpt_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + void *sess_priv = get_sym_session_private_data(sess, dev->driver_id); + + CPT_PMD_INIT_FUNC_TRACE(); + if (sess_priv) { + memset(sess_priv, 0, otx_cpt_get_session_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_sym_session_private_data(sess, dev->driver_id, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +static __rte_always_inline int32_t __hot +otx_cpt_request_enqueue(struct cpt_instance *instance, + struct pending_queue *pqueue, + void *req) +{ + struct cpt_request_info *user_req = (struct cpt_request_info *)req; + + if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN)) + return -EAGAIN; + + fill_cpt_inst(instance, req); + + CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op); + + /* Fill time_out cycles */ + user_req->time_out = rte_get_timer_cycles() + + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); + user_req->extra_time = 0; + + /* Default mode of software queue */ + mark_cpt_inst(instance); + + pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req; + + /* We will use soft queue length here to limit requests */ + MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN); + pqueue->pending_count += 1; + + CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p " + "op: %p", user_req, user_req->op); + return 0; +} + +static __rte_always_inline int __hot +otx_cpt_enq_single_sym(struct cpt_instance *instance, + struct rte_crypto_op *op, + struct pending_queue *pqueue) +{ + struct cpt_sess_misc *sess; + struct rte_crypto_sym_op *sym_op = op->sym; + void *prep_req, *mdata = NULL; + int ret = 0; + uint64_t cpt_op; + + sess = (struct cpt_sess_misc *) + get_sym_session_private_data(sym_op->session, + otx_cryptodev_driver_id); + + cpt_op = sess->cpt_op; + + if (likely(cpt_op & CPT_OP_CIPHER_MASK)) + ret = fill_fc_params(op, sess, &instance->meta_info, &mdata, + &prep_req); + else + ret = fill_digest_params(op, sess, &instance->meta_info, + &mdata, &prep_req); + + if (unlikely(ret)) { + CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x " + "ret 0x%x", op, (unsigned int)cpt_op, ret); + return ret; + } + + /* Enqueue prepared instruction to h/w */ + ret = otx_cpt_request_enqueue(instance, pqueue, prep_req); + + if (unlikely(ret)) { + /* Buffer allocated for request preparation need to be freed */ + free_op_meta(mdata, instance->meta_info.pool); + return ret; + } + + return 0; +} + +static __rte_always_inline int __hot +otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance, + struct rte_crypto_op *op, + struct pending_queue *pqueue) +{ + struct cpt_sess_misc *sess; + struct rte_crypto_sym_op *sym_op = op->sym; + int ret; + void *sess_t = NULL; + void *sess_private_data_t = NULL; + + /* Create tmp session */ + + if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) { + ret = -ENOMEM; + goto exit; + } + + if (rte_mempool_get(instance->sess_mp_priv, + (void **)&sess_private_data_t)) { + ret = -ENOMEM; + goto free_sess; + } + + sess = (struct cpt_sess_misc *)sess_private_data_t; + + sess->ctx_dma_addr = rte_mempool_virt2iova(sess) + + sizeof(struct cpt_sess_misc); + + ret = instance_session_cfg(sym_op->xform, (void *)sess); + if (unlikely(ret)) { + ret = -EINVAL; + goto free_sess_priv; + } + + /* Save tmp session in op */ + + sym_op->session = (struct rte_cryptodev_sym_session *)sess_t; + set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id, + sess_private_data_t); + + /* Enqueue op with the tmp session set */ + ret = otx_cpt_enq_single_sym(instance, op, pqueue); + + if (unlikely(ret)) + goto free_sess_priv; + + return 0; + +free_sess_priv: + rte_mempool_put(instance->sess_mp_priv, sess_private_data_t); +free_sess: + rte_mempool_put(instance->sess_mp, sess_t); +exit: + return ret; +} + +static __rte_always_inline int __hot +otx_cpt_enq_single(struct cpt_instance *inst, + struct rte_crypto_op *op, + struct pending_queue *pqueue) +{ + /* Check for the type */ + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + return otx_cpt_enq_single_sym(inst, op, pqueue); + else if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) + return otx_cpt_enq_single_sym_sessless(inst, op, pqueue); + + /* Should not reach here */ + return -EINVAL; +} + +static uint16_t +otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct cpt_instance *instance = (struct cpt_instance *)qptr; + uint16_t count; + int ret; + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + struct pending_queue *pqueue = &cptvf->pqueue; + + count = DEFAULT_CMD_QLEN - pqueue->pending_count; + if (nb_ops > count) + nb_ops = count; + + count = 0; + while (likely(count < nb_ops)) { + + /* Enqueue single op */ + ret = otx_cpt_enq_single(instance, ops[count], pqueue); + + if (unlikely(ret)) + break; + count++; + } + otx_cpt_ring_dbell(instance, count); + return count; +} + +static __rte_always_inline void +otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp) +{ + /* H/w has returned success */ + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Perform further post processing */ + + if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + /* Check if auth verify need to be completed */ + if (unlikely(rsp[2])) + compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]); + return; + } +} + +static uint16_t +otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct cpt_instance *instance = (struct cpt_instance *)qptr; + struct cpt_request_info *user_req; + struct cpt_vf *cptvf = (struct cpt_vf *)instance; + struct rid *rid_e; + uint8_t cc[nb_ops]; + int i, count, pcount; + uint8_t ret; + int nb_completed; + struct pending_queue *pqueue = &cptvf->pqueue; + struct rte_crypto_op *cop; + void *metabuf; + uintptr_t *rsp; + + pcount = pqueue->pending_count; + count = (nb_ops > pcount) ? pcount : nb_ops; + + for (i = 0; i < count; i++) { + rid_e = &pqueue->rid_queue[pqueue->deq_head]; + user_req = (struct cpt_request_info *)(rid_e->rid); + + if (likely((i+1) < count)) + rte_prefetch_non_temporal((void *)rid_e[1].rid); + + ret = check_nb_command_id(user_req, instance); + + if (unlikely(ret == ERR_REQ_PENDING)) { + /* Stop checking for completions */ + break; + } + + /* Return completion code and op handle */ + cc[i] = ret; + ops[i] = user_req->op; + + CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d", + user_req, user_req->op, ret); + + MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN); + pqueue->pending_count -= 1; + } + + nb_completed = i; + + for (i = 0; i < nb_completed; i++) { + + rsp = (void *)ops[i]; + + if (likely((i + 1) < nb_completed)) + rte_prefetch0(ops[i+1]); + + metabuf = (void *)rsp[0]; + cop = (void *)rsp[1]; + + ops[i] = cop; + + /* Check completion code */ + + if (likely(cc[i] == 0)) { + /* H/w success pkt. Post process */ + otx_cpt_dequeue_post_process(cop, rsp); + } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) { + /* auth data mismatch */ + cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + /* Error */ + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + + if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + void *sess_private_data_t = + get_sym_session_private_data(cop->sym->session, + otx_cryptodev_driver_id); + memset(sess_private_data_t, 0, + cpt_get_session_size()); + memset(cop->sym->session, 0, + rte_cryptodev_sym_get_existing_header_session_size( + cop->sym->session)); + rte_mempool_put(instance->sess_mp_priv, + sess_private_data_t); + rte_mempool_put(instance->sess_mp, cop->sym->session); + cop->sym->session = NULL; + } + free_op_meta(metabuf, instance->meta_info.pool); + } + + return nb_completed; +} + +static struct rte_cryptodev_ops cptvf_ops = { + /* Device related operations */ + .dev_configure = otx_cpt_dev_config, + .dev_start = otx_cpt_dev_start, + .dev_stop = otx_cpt_dev_stop, + .dev_close = otx_cpt_dev_close, + .dev_infos_get = otx_cpt_dev_info_get, + + .stats_get = otx_cpt_stats_get, + .stats_reset = otx_cpt_stats_reset, + .queue_pair_setup = otx_cpt_que_pair_setup, + .queue_pair_release = otx_cpt_que_pair_release, + .queue_pair_count = NULL, + + /* Crypto related operations */ + .sym_session_get_size = otx_cpt_get_session_size, + .sym_session_configure = otx_cpt_session_cfg, + .sym_session_clear = otx_cpt_session_clear +}; + +int +otx_cpt_dev_create(struct rte_cryptodev *c_dev) +{ + struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device); + struct cpt_vf *cptvf = NULL; + void *reg_base; + char dev_name[32]; + int ret; + + if (pdev->mem_resource[0].phys_addr == 0ULL) + return -EIO; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem", + sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE, + rte_socket_id()); + + if (cptvf == NULL) { + CPT_LOG_ERR("Cannot allocate memory for device private data"); + return -ENOMEM; + } + + snprintf(dev_name, 32, "%02x:%02x.%x", + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + + reg_base = pdev->mem_resource[0].addr; + if (!reg_base) { + CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name); + ret = -ENODEV; + goto fail; + } + + ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name); + if (ret) { + CPT_LOG_ERR("Failed to init cptvf %s", dev_name); + ret = -EIO; + goto fail; + } + + /* Start off timer for mailbox interrupts */ + otx_cpt_periodic_alarm_start(cptvf); + + c_dev->dev_ops = &cptvf_ops; + + c_dev->enqueue_burst = otx_cpt_pkt_enqueue; + c_dev->dequeue_burst = otx_cpt_pkt_dequeue; + + c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT; + + /* Save dev private data */ + c_dev->data->dev_private = cptvf; + + return 0; + +fail: + if (cptvf) { + /* Free private data allocated */ + rte_free(cptvf); + } + + return ret; +} diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.h b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.h new file mode 100644 index 000000000..768ec4f2c --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _OTX_CRYPTODEV_OPS_H_ +#define _OTX_CRYPTODEV_OPS_H_ + +#define OTX_CPT_MIN_HEADROOM_REQ (24) +#define OTX_CPT_MIN_TAILROOM_REQ (8) +#define CPT_NUM_QS_PER_VF (1) + +int +otx_cpt_dev_create(struct rte_cryptodev *c_dev); + +#endif /* _OTX_CRYPTODEV_OPS_H_ */ diff --git a/src/seastar/dpdk/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map b/src/seastar/dpdk/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map new file mode 100644 index 000000000..521e51f41 --- /dev/null +++ b/src/seastar/dpdk/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map @@ -0,0 +1,4 @@ +DPDK_18.11 { + + local: *; +}; |