summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/drivers/crypto/qat
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/Makefile59
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h175
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h316
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h404
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h309
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h174
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c1014
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c1293
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h141
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h574
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_logs.h78
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_qp.c490
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c163
14 files changed, 5193 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/crypto/qat/Makefile b/src/seastar/dpdk/drivers/crypto/qat/Makefile
new file mode 100644
index 00000000..7322ffe4
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2015 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_qat.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := rte_pmd_qat_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
new file mode 100644
index 00000000..d218f85d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -0,0 +1,175 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include <rte_io.h>
+
+/* CSR write macro */
+#define ADF_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ADF_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+#define ADF_NUM_BUNDLES_PER_DEV 1
+#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h
new file mode 100644
index 00000000..5de34d55
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h
@@ -0,0 +1,316 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <sys/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+ uint32_t src_length;
+ uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ uint8_t resrvd1;
+ uint8_t service_cmd_id;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t serv_specif_flags;
+ uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ uint8_t xlat_err_code;
+ uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ uint8_t resrvd1;
+ uint8_t service_id;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ uint8_t comn_status;
+ uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+ ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+ (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+ QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+ (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+ QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+ (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
new file mode 100644
index 00000000..fbf2b839
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t cipher_padding_sz;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ uint32_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t resrvd3;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd4;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id_cipher;
+ uint8_t cipher_padding_sz;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id_auth;
+ uint8_t resrvd1;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd2;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+ } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+ uint32_t auth_off;
+ uint32_t auth_len;
+ union {
+ uint64_t auth_partial_st_prefix;
+ uint64_t aad_adr;
+ } u1;
+ uint64_t auth_res_addr;
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint8_t hash_state_sz;
+ uint8_t auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h
new file mode 100644
index 00000000..ebe245f6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -0,0 +1,309 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ uint32_t config;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+ QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+ (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+ & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+ ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ uint32_t counter;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_auth_op {
+ ICP_QAT_HW_AUTH_VERIFY = 0,
+ ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+
+struct icp_qat_hw_cipher_algo_blk {
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
+} __rte_cache_aligned;
+
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h
new file mode 100644
index 00000000..5c63406b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -0,0 +1,174 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_ALGS_H_
+#define _ICP_QAT_ALGS_H_
+#include <rte_memory.h>
+#include <rte_crypto.h>
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+struct qat_alg_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+enum qat_crypto_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SGL_MAX_NUMBER 16
+
+struct qat_alg_buf_list {
+ uint64_t resrvd;
+ uint32_t num_bufs;
+ uint32_t num_mapped_bufs;
+ struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_crypto_op_cookie {
+ struct qat_alg_buf_list qat_sgl_list_src;
+ struct qat_alg_buf_list qat_sgl_list_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+/* Common content descriptor */
+struct qat_alg_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_alg_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ rte_spinlock_t lock; /* protects this struct */
+};
+
+struct qat_alg_ablkcipher_cd {
+ struct icp_qat_hw_cipher_algo_blk *cd;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ struct qat_crypto_instance *inst;
+ rte_spinlock_t lock; /* protects this struct */
+};
+
+int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
+
+int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t add_auth_data_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_crypto_proto_flag proto_flags);
+
+void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
+ int alg, const uint8_t *key,
+ unsigned int keylen);
+
+void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
+ int alg, const uint8_t *key,
+ unsigned int keylen);
+
+int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
new file mode 100644
index 00000000..154e1ddd
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -0,0 +1,1014 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_crypto_sym.h>
+
+#include "../qat_logs.h"
+
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+
+#include "qat_algs.h"
+
+/* returns block size in bytes per cipher algo */
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
+{
+ switch (qat_cipher_alg) {
+ case ICP_QAT_HW_CIPHER_ALGO_DES:
+ return ICP_QAT_HW_DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_3DES:
+ return ICP_QAT_HW_3DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_AES128:
+ case ICP_QAT_HW_CIPHER_ALGO_AES192:
+ case ICP_QAT_HW_CIPHER_ALGO_AES256:
+ return ICP_QAT_HW_AES_BLK_SZ;
+ default:
+ PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/*
+ * Returns size in bytes per hash algo for state1 size field in cd_ctrl
+ * This is digest size rounded up to nearest quadword
+ */
+static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum state1 size in this case */
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns digest size in bytes per hash algo */
+static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return ICP_QAT_HW_SHA224_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return ICP_QAT_HW_SHA384_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return ICP_QAT_HW_MD5_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum digest size in this case */
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns block size in byes per hash algo */
+static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ return 16;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return MD5_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum block size in this case */
+ return SHA512_CBLOCK;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+ MD5_CTX ctx;
+
+ if (!MD5_Init(&ctx))
+ return -EFAULT;
+ MD5_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+ return 0;
+}
+
+static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+ uint8_t *data_in,
+ uint8_t *data_out)
+{
+ int digest_size;
+ uint8_t digest[qat_hash_get_digest_size(
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint32_t *hash_state_out_be32;
+ uint64_t *hash_state_out_be64;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ digest_size = qat_hash_get_digest_size(hash_alg);
+ if (digest_size <= 0)
+ return -EFAULT;
+
+ hash_state_out_be32 = (uint32_t *)data_out;
+ hash_state_out_be64 = (uint64_t *)data_out;
+
+ switch (hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (partial_hash_sha1(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (partial_hash_sha224(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (partial_hash_sha256(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (partial_hash_sha384(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (partial_hash_sha512(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (partial_hash_md5(data_in, data_out))
+ return -EFAULT;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+#define HASH_XCBC_PRECOMP_KEY_NUM 3
+
+static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+ const uint8_t *auth_key,
+ uint16_t auth_keylen,
+ uint8_t *p_state_buf,
+ uint16_t *p_state_len)
+{
+ int block_size;
+ uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
+ } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ AES_KEY enc_key;
+
+ memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_GALOIS_H_SZ, 16);
+ memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+ rte_free(in);
+ return 0;
+ }
+
+ block_size = qat_hash_get_block_size(hash_alg);
+ if (block_size <= 0)
+ return -EFAULT;
+ /* init ipad and opad from key and xor with fixed values */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+
+ if (auth_keylen > (unsigned int)block_size) {
+ PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
+ return -EFAULT;
+ }
+ rte_memcpy(ipad, auth_key, auth_keylen);
+ rte_memcpy(opad, auth_key, auth_keylen);
+
+ for (i = 0; i < block_size; i++) {
+ uint8_t *ipad_ptr = ipad + i;
+ uint8_t *opad_ptr = opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+
+ /* do partial hash of ipad and copy to state1 */
+ if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ PMD_DRV_LOG(ERR, "ipad precompute failed");
+ return -EFAULT;
+ }
+
+ /*
+ * State len is a multiple of 8, so may be larger than the digest.
+ * Put the partial hash of opad state_len bytes after state1
+ */
+ *p_state_len = qat_hash_get_state1_size(hash_alg);
+ if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ PMD_DRV_LOG(ERR, "opad precompute failed");
+ return -EFAULT;
+ }
+
+ /* don't leave data lying around */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ return 0;
+}
+
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_crypto_proto_flag proto_flags)
+{
+ PMD_INIT_FUNC_TRACE();
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+
+ switch (proto_flags) {
+ case QAT_CRYPTO_PROTO_FLAG_NONE:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_CCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_GCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_SNOW_3G_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_ZUC:
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_ZUC_3G_PROTO);
+ break;
+ }
+
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+}
+
+/*
+ * Snow3G and ZUC should never use this function
+ * and set its protocol flag in both cipher and auth part of content
+ * descriptor building function
+ */
+static enum qat_crypto_proto_flag
+qat_get_crypto_proto_flag(uint16_t flags)
+{
+ int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ switch (proto) {
+ case ICP_QAT_FW_LA_GCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ break;
+ case ICP_QAT_FW_LA_CCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ break;
+ }
+
+ return qat_proto_flag;
+}
+
+int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
+ uint8_t *cipherkey,
+ uint32_t cipherkeylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ enum icp_qat_hw_cipher_convert key_convert;
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+ uint32_t total_key_size;
+ uint16_t cipher_offset, cd_size;
+ uint32_t wordIndex = 0;
+ uint32_t *temp_key = NULL;
+ PMD_INIT_FUNC_TRACE();
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ return -EFAULT;
+ }
+
+ if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ /*
+ * CTR Streaming ciphers are a special case. Decrypt = encrypt
+ * Overriding default values previously set
+ */
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
+ || cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_padding_sz =
+ (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
+ total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
+ total_key_size = ICP_QAT_HW_DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ } else {
+ total_key_size = cipherkeylen;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ }
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
+ cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ header->service_cmd_id = cdesc->qat_cmd;
+ qat_alg_init_common_hdr(header, qat_proto_flag);
+
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+ cipher->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
+ cdesc->qat_cipher_alg, key_convert,
+ cdesc->qat_dir);
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+ sizeof(struct icp_qat_hw_cipher_config)
+ + cipherkeylen);
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ memcpy(temp_key, cipherkey, cipherkeylen);
+
+ /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+ wordIndex++)
+ temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen + cipherkeylen;
+ } else {
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen;
+ }
+
+ if (total_key_size > cipherkeylen) {
+ uint32_t padding_size = total_key_size-cipherkeylen;
+ if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ /* K3 not provided so use K1 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
+ else
+ memset(cdesc->cd_cur_ptr, 0, padding_size);
+ cdesc->cd_cur_ptr += padding_size;
+ }
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t add_auth_data_length,
+ uint32_t digestsize,
+ unsigned int operation)
+{
+ struct icp_qat_hw_auth_setup *hash;
+ struct icp_qat_hw_cipher_algo_blk *cipherconfig;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+ uint16_t state1_size = 0, state2_size = 0;
+ uint16_t hash_offset, cd_size;
+ uint32_t *aad_len = NULL;
+ uint32_t wordIndex = 0;
+ uint32_t *pTempKey;
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ return -EFAULT;
+ }
+
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+ }
+
+ /*
+ * Setup the inner hash config
+ */
+ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
+ hash->auth_config.reserved = 0;
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ cdesc->qat_hash_alg, digestsize);
+
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+ hash->auth_counter.counter = 0;
+ else
+ hash->auth_counter.counter = rte_bswap32(
+ qat_hash_get_block_size(cdesc->qat_hash_alg));
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
+
+ /*
+ * cd_cur_ptr now points at the state1 information.
+ */
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
+ return -EFAULT;
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
+ if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ PMD_DRV_LOG(ERR, "(GCM)precompute failed");
+ return -EFAULT;
+ }
+ /*
+ * Write (the length of AAD) into bytes 16-19 of state2
+ * in big-endian format. This field is 8 bytes
+ */
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+ aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ);
+ *aad_len = rte_bswap32(add_auth_data_length);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
+ state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ (cdesc->cd_cur_ptr + state1_size + state2_size);
+ cipherconfig->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->key, authkey, authkeylen);
+ memset(cipherconfig->key + authkeylen,
+ 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+ cdesc->qat_hash_alg, digestsize);
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
+ state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ cdesc->cd_cur_ptr += state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ authkey, authkeylen, cdesc->cd_cur_ptr,
+ &state1_size)) {
+ PMD_DRV_LOG(ERR, "(MD5)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
+ state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+ pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
+ + authkeylen);
+ /*
+ * The Inner Hash Initial State2 block must contain IK
+ * (Initialisation Key), followed by IK XOR-ed with KM
+ * (Key Modifier): IK||(IK^KM).
+ */
+ /* write the auth key */
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ /* initialise temp key with auth key */
+ memcpy(pTempKey, authkey, authkeylen);
+ /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
+ pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
+ }
+
+ /* Request template setup */
+ qat_alg_init_common_hdr(header, qat_proto_flag);
+ header->service_cmd_id = cdesc->qat_cmd;
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+ hash_cd_ctrl->inner_state1_sz = state1_size;
+ auth_param->auth_res_sz = digestsize;
+
+ hash_cd_ctrl->inner_state2_sz = state2_size;
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
+ >> 3);
+
+ cdesc->cd_cur_ptr += state1_size + state2_size;
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case ICP_QAT_HW_AES_192_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case ICP_QAT_HW_AES_256_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_KASUMI_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_DES_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case QAT_3DES_KEY_SZ_OPT1:
+ case QAT_3DES_KEY_SZ_OPT2:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c
new file mode 100644
index 00000000..386aa453
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c
@@ -0,0 +1,1293 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_hexdump.h>
+#include <rte_crypto_sym.h>
+#include <openssl/evp.h>
+
+#include "qat_logs.h"
+#include "qat_algs.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+#define BYTE_LENGTH 8
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_pmd_private *internals) {
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_pmd_private *internals) {
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[16];
+ int i;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst+i) = *(src+i)^(encrypted_iv[i]);
+
+ return 0;
+
+cipher_encrypt_err:
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[16];
+ int i;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst+i) = *(src+i)^(encrypted_iv[i]);
+
+ return 0;
+
+cipher_decrypt_err:
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static void *
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key)
+{
+ const EVP_CIPHER *algo = NULL;
+ EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+
+ if (ctx == NULL)
+ goto ctx_init_err;
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
+ goto ctx_init_err;
+
+ return ctx;
+
+ctx_init_err:
+ if (ctx != NULL)
+ EVP_CIPHER_CTX_free(ctx);
+ return NULL;
+}
+
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+static inline uint32_t
+adf_modulo(uint32_t data, uint32_t shift);
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+ struct qat_crypto_op_cookie *qat_op_cookie);
+
+void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ void *session)
+{
+ struct qat_session *sess = session;
+ phys_addr_t cd_paddr;
+
+ PMD_INIT_FUNC_TRACE();
+ if (sess) {
+ if (sess->bpi_ctx) {
+ bpi_cipher_ctx_free(sess->bpi_ctx);
+ sess->bpi_ctx = NULL;
+ }
+ cd_paddr = sess->cd_paddr;
+ memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
+ sess->cd_paddr = cd_paddr;
+ } else
+ PMD_DRV_LOG(ERR, "NULL session");
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+void *
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_session *session = session_private;
+ struct qat_pmd_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_alg_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ session->bpi_ctx = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data);
+ if (session->bpi_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_alg_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ session->bpi_ctx = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data);
+ if (session->bpi_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ goto error_out;
+ }
+ if (qat_alg_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length))
+ goto error_out;
+
+ return session;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return NULL;
+}
+
+
+void *
+qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_session *session = session_private;
+
+ int qat_cmd_id;
+ PMD_INIT_FUNC_TRACE();
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
+ goto error_out;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ PMD_DRV_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ goto error_out;
+ }
+
+ return session;
+
+error_out:
+ return NULL;
+}
+
+struct qat_session *
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session_private)
+{
+
+ struct qat_session *session = session_private;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct qat_pmd_private *internals = dev->data->dev_private;
+ auth_xform = qat_get_auth_xform(xform);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ goto error_out;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (session->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length,
+ auth_xform->add_auth_data_length,
+ auth_xform->digest_length,
+ auth_xform->op))
+ goto error_out;
+ } else {
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ auth_xform->key.data,
+ auth_xform->key.length,
+ auth_xform->add_auth_data_length,
+ auth_xform->digest_length,
+ auth_xform->op))
+ goto error_out;
+ }
+ return session;
+
+error_out:
+ return NULL;
+}
+
+unsigned qat_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
+}
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_session *ctx,
+ struct rte_crypto_op *op)
+{
+ uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = sym_op->cipher.iv.data;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
+ last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
+ last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_session *ctx,
+ struct rte_crypto_op *op)
+{
+ uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = sym_op->cipher.iv.data;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "BPI: src before post-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "BPI: src after post-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst after post-process:", dst,
+ last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register struct rte_crypto_op **cur_op = ops;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+ int overflow;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
+ - queue->max_inflights;
+ if (overflow > 0) {
+ rte_atomic16_sub(&tmp_qp->inflights16, overflow);
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
+ tmp_qp->op_cookies[tail / queue->msg_size]);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ /*
+ * This message cannot be enqueued,
+ * decrease number of ops that wasnt sent
+ */
+ rte_atomic16_sub(&tmp_qp->inflights16,
+ nb_ops_possible - nb_ops_sent);
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo);
+ nb_ops_sent++;
+ cur_op++;
+ }
+kick_tail:
+ WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, tail);
+ queue->tail = tail;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t msg_counter = 0;
+ struct rte_crypto_op *rx_op;
+ struct icp_qat_fw_comn_resp *resp_msg;
+
+ queue = &(tmp_qp->rx_q);
+ resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr + queue->head);
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+
+#endif
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_session *sess = (struct qat_session *)
+ (rx_op->sym->session->_private);
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
+ queue->head = adf_modulo(queue->head +
+ queue->msg_size,
+ ADF_RING_SIZE_MODULO(queue->queue_size));
+ resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr +
+ queue->head);
+ *ops = rx_op;
+ ops++;
+ msg_counter++;
+ }
+ if (msg_counter > 0) {
+ WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
+ queue->hw_bundle_number,
+ queue->hw_queue_number, queue->head);
+ rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
+ tmp_qp->stats.dequeued_count += msg_counter;
+ }
+ return msg_counter;
+}
+
+static inline int
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
+ struct qat_alg_buf_list *list, uint32_t data_len)
+{
+ int nr = 1;
+
+ uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ buff_start + rte_pktmbuf_data_len(buf);
+
+ list->bufers[0].addr = buff_start;
+ list->bufers[0].resrvd = 0;
+ list->bufers[0].len = buf_len;
+
+ if (data_len <= buf_len) {
+ list->num_bufs = nr;
+ list->bufers[0].len = data_len;
+ return 0;
+ }
+
+ buf = buf->next;
+ while (buf) {
+ if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+ PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
+ " entry(%u)",
+ QAT_SGL_MAX_NUMBER);
+ return -EINVAL;
+ }
+
+ list->bufers[nr].len = rte_pktmbuf_data_len(buf);
+ list->bufers[nr].resrvd = 0;
+ list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+
+ buf_len += list->bufers[nr].len;
+ buf = buf->next;
+
+ if (buf_len > data_len) {
+ list->bufers[nr].len -=
+ buf_len - data_len;
+ buf = NULL;
+ }
+ ++nr;
+ }
+ list->num_bufs = nr;
+
+ return 0;
+}
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+ struct qat_crypto_op_cookie *qat_op_cookie)
+{
+ int ret = 0;
+ struct qat_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+#endif
+ if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ PMD_DRV_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_session *)op->sym->session->_private;
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ do_auth = 1;
+ do_cipher = 1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (cipher_param->cipher_length % BYTE_LENGTH != 0)
+ || (cipher_param->cipher_offset
+ % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ /* copy IV into request if it fits */
+ /*
+ * If IV length is zero do not copy anything but still
+ * use request descriptor embedded IV
+ *
+ */
+ if (op->sym->cipher.iv.length) {
+ if (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ op->sym->cipher.iv.phys_addr;
+ }
+ }
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
+ || (auth_param->auth_len % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ if (do_cipher) {
+ auth_len = auth_len + auth_ofs + 1 -
+ ICP_QAT_HW_KASUMI_BLK_SZ;
+ auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
+ } else {
+ auth_len = auth_len + auth_ofs + 1;
+ auth_ofs = 0;
+ }
+ }
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ auth_ofs = op->sym->cipher.data.offset;
+ auth_len = op->sym->cipher.data.length;
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+ }
+ min_ofs = auth_ofs;
+
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+ if (do_auth) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
+ &qat_op_cookie->qat_sgl_list_src,
+ qat_req->comn_mid.src_length);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ qat_op_cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ dst_buf_start,
+ &qat_op_cookie->qat_sgl_list_dst,
+ qat_req->comn_mid.dst_length);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot "
+ "fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ qat_op_cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ qat_op_cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ if (op->sym->cipher.iv.length == 12) {
+ /*
+ * For GCM a 12 bit IV is allowed,
+ * but we need to inform the f/w
+ */
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ if (op->sym->cipher.data.length == 0) {
+ /*
+ * GMAC
+ */
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ op->sym->auth.aad.phys_addr;
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
+ cipher_param->cipher_length = 0;
+ cipher_param->cipher_offset = 0;
+ auth_param->u1.aad_adr = 0;
+ auth_param->auth_len = op->sym->auth.aad.length;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->u2.aad_sz = 0;
+ }
+ }
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
+#endif
+ return 0;
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+ uint32_t div = data >> shift;
+ uint32_t mult = div << shift;
+
+ return data - mult;
+}
+
+void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
+{
+ struct rte_cryptodev_sym_session *sess = sym_sess;
+ struct qat_session *s = (void *)sess->_private;
+
+ PMD_INIT_FUNC_TRACE();
+ s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
+ offsetof(struct qat_session, cd) +
+ offsetof(struct rte_cryptodev_sym_session, _private);
+}
+
+int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+int qat_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_crypto_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_pmd_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ ADF_NUM_SYM_QPS_PER_BUNDLE *
+ ADF_NUM_BUNDLES_PER_DEV;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ }
+}
+
+void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int i;
+ struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+ }
+}
+
+void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < dev->data->nb_queue_pairs; i++)
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
+}
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h
new file mode 100644
index 00000000..b740d6b0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h
@@ -0,0 +1,141 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_CRYPTO_H_
+#define _QAT_CRYPTO_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_memzone.h>
+
+#include "qat_crypto_capabilities.h"
+
+/*
+ * This macro rounds up a number to a be a multiple of
+ * the alignment when the alignment is a power of 2
+ */
+#define ALIGN_POW2_ROUNDUP(num, align) \
+ (((num) + (align) - 1) & ~((align) - 1))
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ phys_addr_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo;
+ uint32_t msg_size;
+ uint16_t max_inflights;
+ uint32_t queue_size;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ rte_atomic16_t inflights16;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct rte_cryptodev_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
+} __rte_cache_aligned;
+
+/** private data structure for each QAT device */
+struct qat_pmd_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+};
+
+int qat_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+int qat_dev_start(struct rte_cryptodev *dev);
+void qat_dev_stop(struct rte_cryptodev *dev);
+int qat_dev_close(struct rte_cryptodev *dev);
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info);
+
+void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
+
+int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *rx_conf, int socket_id);
+int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+int
+qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
+
+extern unsigned
+qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
+
+extern void
+qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
+
+extern void *
+qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+struct qat_session *
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session_private);
+
+void *
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+
+extern void
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
+
+
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _QAT_CRYPTO_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h
new file mode 100644
index 00000000..1294f247
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -0,0 +1,574 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_CRYPTO_CAPABILITIES_H_
+#define _QAT_CRYPTO_CAPABILITIES_H_
+
+#define QAT_BASE_CPM16_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 128, \
+ .max = 128, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 128, \
+ .max = 128, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 8, \
+ .max = 64, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES XCBC MAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 240, \
+ .increment = 1 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GMAC (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 1, \
+ .max = 65535, \
+ .increment = 1 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UIA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UEA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* NULL (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, }, \
+ }, }, \
+ }, \
+ { /* NULL (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, }, \
+ }, } \
+ }, \
+ { /* KASUMI (F8) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* KASUMI (F9) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES DOCSISBPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \
+ { /* ZUC (EEA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* ZUC (EIA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_logs.h b/src/seastar/dpdk/drivers/crypto/qat/qat_logs.h
new file mode 100644
index 00000000..a909f630
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_logs.h
@@ -0,0 +1,78 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c b/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c
new file mode 100644
index 00000000..a358ccd7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c
@@ -0,0 +1,490 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_crypto.h"
+#include "qat_algs.h"
+#include "adf_transport_access_macros.h"
+
+#define ADF_MAX_SYM_DESC 4096
+#define ADF_MIN_SYM_DESC 128
+#define ADF_SYM_TX_RING_DESC_SIZE 128
+#define ADF_SYM_RX_RING_DESC_SIZE 32
+#define ADF_SYM_TX_QUEUE_STARTOFF 2
+/* Offset from bundle start to 1st Sym Tx queue */
+#define ADF_SYM_RX_QUEUE_STARTOFF 10
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static int qat_tx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
+ int socket_id);
+static int qat_rx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
+ int socket_id);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
+ int socket_id);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+ unsigned memzone_flags = 0;
+ const struct rte_memseg *ms;
+
+ PMD_INIT_FUNC_TRACE();
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ PMD_DRV_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ PMD_DRV_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ ms = rte_eal_get_physmem_layout();
+ switch (ms[0].hugepage_sz) {
+ case(RTE_PGSIZE_2M):
+ memzone_flags = RTE_MEMZONE_2MB;
+ break;
+ case(RTE_PGSIZE_1G):
+ memzone_flags = RTE_MEMZONE_1GB;
+ break;
+ case(RTE_PGSIZE_16M):
+ memzone_flags = RTE_MEMZONE_16MB;
+ break;
+ case(RTE_PGSIZE_16G):
+ memzone_flags = RTE_MEMZONE_16GB;
+ break;
+ default:
+ memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+}
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(queue_name, queue_size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
+ memzone_flags, queue_size);
+#endif
+}
+
+int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct qat_qp *qp;
+ struct rte_pci_device *pci_dev;
+ int ret;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[queue_pair_id] != NULL) {
+ ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
+ (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
+ PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
+ qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ PMD_DRV_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ if (queue_pair_id >=
+ (ADF_NUM_SYM_QPS_PER_BUNDLE *
+ ADF_NUM_BUNDLES_PER_DEV)) {
+ PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
+ queue_pair_id);
+ return -EINVAL;
+ }
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->nb_descriptors = qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE);
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+ rte_atomic16_init(&qp->inflights16);
+
+ if (qat_tx_queue_create(dev, &(qp->tx_q),
+ queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
+ PMD_INIT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ if (qat_rx_queue_create(dev, &(qp->rx_q),
+ queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
+ PMD_DRV_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
+ dev->driver->pci_drv.driver.name, dev->data->dev_id,
+ queue_pair_id);
+
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ sizeof(struct qat_crypto_op_cookie), 64, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!qp->op_cookie_pool) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ return -EFAULT;
+ }
+
+ struct qat_crypto_op_cookie *sql_cookie =
+ qp->op_cookies[i];
+
+ sql_cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2phy(qp->op_cookie_pool,
+ sql_cookie) +
+ offsetof(struct qat_crypto_op_cookie,
+ qat_sgl_list_src);
+
+ sql_cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2phy(qp->op_cookie_pool,
+ sql_cookie) +
+ offsetof(struct qat_crypto_op_cookie,
+ qat_sgl_list_dst);
+ }
+ dev->data->queue_pairs[queue_pair_id] = qp;
+ return 0;
+
+create_err:
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_qp *qp =
+ (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
+ uint32_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (qp == NULL) {
+ PMD_DRV_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ /* Don't free memory if there are still responses to be processed */
+ if (rte_atomic16_read(&(qp->inflights16)) == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+ return 0;
+}
+
+static int qat_tx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t qp_id,
+ uint32_t nb_desc, int socket_id)
+{
+ PMD_INIT_FUNC_TRACE();
+ queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
+ queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
+ ADF_SYM_TX_QUEUE_STARTOFF;
+ PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
+ nb_desc, qp_id, queue->hw_bundle_number,
+ queue->hw_queue_number);
+
+ return qat_queue_create(dev, queue, nb_desc,
+ ADF_SYM_TX_RING_DESC_SIZE, socket_id);
+}
+
+static int qat_rx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
+ int socket_id)
+{
+ PMD_INIT_FUNC_TRACE();
+ queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
+ queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
+ ADF_SYM_RX_QUEUE_STARTOFF;
+
+ PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
+ nb_desc, qp_id, queue->hw_bundle_number,
+ queue->hw_queue_number);
+ return qat_queue_create(dev, queue, nb_desc,
+ ADF_SYM_RX_RING_DESC_SIZE, socket_id);
+}
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ PMD_DRV_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
+ uint32_t nb_desc, uint8_t desc_size, int socket_id)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ uint32_t queue_size_bytes = nb_desc*desc_size;
+ struct rte_pci_device *pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
+ dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ socket_id);
+ if (qp_mz == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->phys_addr;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ return -EFAULT;
+ }
+
+ if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
+ != 0) {
+ PMD_DRV_LOG(ERR, "Invalid num inflights");
+ return -EINVAL;
+ }
+
+ queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+ ADF_BYTES_TO_MSG_SIZE(desc_size));
+ queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
+ PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
+ " msg_size %u, max_inflights %u modulo %u",
+ queue->queue_size, queue_size_bytes,
+ nb_desc, desc_size, queue->max_inflights,
+ queue->modulo);
+
+ if (queue->max_inflights < 2) {
+ PMD_DRV_LOG(ERR, "Invalid num inflights");
+ return -EINVAL;
+ }
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+ return 0;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ PMD_INIT_FUNC_TRACE();
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ PMD_INIT_FUNC_TRACE();
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ PMD_INIT_FUNC_TRACE();
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value ^= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ PMD_INIT_FUNC_TRACE();
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
diff --git a/src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map b/src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..bbaf1c85
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+}; \ No newline at end of file
diff --git a/src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c b/src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c
new file mode 100644
index 00000000..1bdd30da
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = {
+ QAT_BASE_CPM16_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = {
+ QAT_BASE_CPM16_SYM_CAPABILITIES,
+ QAT_EXTRA_CPM17_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_dev_config,
+ .dev_start = qat_dev_start,
+ .dev_stop = qat_dev_stop,
+ .dev_close = qat_dev_close,
+ .dev_infos_get = qat_dev_info_get,
+
+ .stats_get = qat_crypto_sym_stats_get,
+ .stats_reset = qat_crypto_sym_stats_reset,
+ .queue_pair_setup = qat_crypto_sym_qp_setup,
+ .queue_pair_release = qat_crypto_sym_qp_release,
+ .queue_pair_start = NULL,
+ .queue_pair_stop = NULL,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .session_get_size = qat_crypto_sym_get_session_private_size,
+ .session_configure = qat_crypto_sym_configure_session,
+ .session_initialize = qat_crypto_sym_session_init,
+ .session_clear = qat_crypto_sym_clear_session
+};
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0443),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
+ {.device_id = 0},
+};
+
+static int
+crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv,
+ struct rte_cryptodev *cryptodev)
+{
+ struct qat_pmd_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+ PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x",
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.bus,
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
+
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
+ switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
+ case 0x0443:
+ internals->qat_dev_capabilities = qat_cpm16_capabilities;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ internals->qat_dev_capabilities = qat_cpm17_capabilities;
+ break;
+ default:
+ PMD_DRV_LOG(ERR,
+ "Invalid dev_id, can't determine capabilities");
+ break;
+ }
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_DRV_LOG(DEBUG, "Device already initialised by primary process");
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct rte_cryptodev_driver rte_qat_pmd = {
+ .pci_drv = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_cryptodev_pci_probe,
+ .remove = rte_cryptodev_pci_remove,
+ },
+ .cryptodev_init = crypto_qat_dev_init,
+ .dev_private_size = sizeof(struct qat_pmd_private),
+};
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
+