summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/common')
-rw-r--r--src/spdk/dpdk/drivers/common/Makefile43
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/Makefile24
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_common.h81
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_fpm_tables.c1138
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_hw_types.h581
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_mcode_defines.h442
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_pmd_logs.h48
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c56
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h62
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_ucode.h3505
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/cpt_ucode_asym.h915
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/common/cpt/rte_common_cpt_version.map18
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib.c16
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/compat.h126
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/desc.h2110
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/algo.h792
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/common.h98
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h1613
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h3753
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta.h921
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/fifo_load_store_cmd.h314
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/header_cmd.h231
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/jump_cmd.h173
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/key_cmd.h190
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/load_cmd.h306
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/math_cmd.h371
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/move_cmd.h412
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/nfifo_cmd.h163
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h570
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/protocol_cmd.h710
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h823
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/seq_in_out_ptr_cmd.h178
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/signature_cmd.h42
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/store_cmd.h152
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/compat.h393
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/dpaa_list.h75
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/dpaa_of.c589
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/dpaa_of.h187
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.c471
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.h111
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/dpaax_logs.h49
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/meson.build19
-rw-r--r--src/spdk/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map26
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/Makefile27
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/README20
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c962
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h94
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h652
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h36
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_common.c1025
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_devids.h17
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_impl.c95
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h199
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h94
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_register.h93
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_status.h79
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_type.h1014
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map12
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/virtchnl.h1311
-rw-r--r--src/spdk/dpdk/drivers/common/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/Makefile362
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/meson.build227
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common.c434
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common.h218
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.c188
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.h98
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.c1110
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.h160
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_common_utils.h20
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c1534
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h377
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.c1294
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.h309
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.c1721
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.h79
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/mlx5_prm.h2560
-rw-r--r--src/spdk/dpdk/drivers/common/mlx5/rte_common_mlx5_version.map75
-rw-r--r--src/spdk/dpdk/drivers/common/mvep/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/common/mvep/meson.build20
-rw-r--r--src/spdk/dpdk/drivers/common/mvep/mvep_common.c45
-rw-r--r--src/spdk/dpdk/drivers/common/mvep/rte_common_mvep_version.map8
-rw-r--r--src/spdk/dpdk/drivers/common/mvep/rte_mvep_common.h21
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/Makefile22
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/meson.build5
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c358
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h46
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map12
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/Makefile39
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h1391
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h305
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h482
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h212
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h184
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h209
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h56
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h34
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/meson.build26
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c291
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h171
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c1043
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h154
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h95
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h63
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c254
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h28
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c462
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h1773
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c183
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h43
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map45
-rw-r--r--src/spdk/dpdk/drivers/common/qat/Makefile71
-rw-r--r--src/spdk/dpdk/drivers/common/qat/meson.build15
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h136
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h323
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h555
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h374
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h1538
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h426
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h405
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h79
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_common.c126
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_common.h83
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_device.c378
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_device.h138
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_logs.c34
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_logs.h33
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_qp.c992
-rw-r--r--src/spdk/dpdk/drivers/common/qat/qat_qp.h119
131 files changed, 53386 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/common/Makefile b/src/spdk/dpdk/drivers/common/Makefile
new file mode 100644
index 000000000..df2e840cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/Makefile
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CPT-y := $(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO)
+CPT-y += $(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO)
+ifneq (,$(findstring y,$(CPT-y)))
+DIRS-y += cpt
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy)
+DIRS-y += octeontx
+endif
+OCTEONTX2-y := $(CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL)
+OCTEONTX2-y += $(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO)
+ifeq ($(findstring y,$(OCTEONTX2-y)),y)
+DIRS-y += octeontx2
+endif
+
+MVEP-y := $(CONFIG_RTE_LIBRTE_MVPP2_PMD)
+MVEP-y += $(CONFIG_RTE_LIBRTE_MVNETA_PMD)
+MVEP-y += $(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO)
+ifneq (,$(findstring y,$(MVEP-y)))
+DIRS-y += mvep
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_COMMON_DPAAX),y)
+DIRS-y += dpaax
+endif
+
+IAVF-y := $(CONFIG_RTE_LIBRTE_IAVF_PMD)
+IAVF-y += $(CONFIG_RTE_LIBRTE_ICE_PMD)
+ifneq (,$(findstring y,$(IAVF-y)))
+DIRS-y += iavf
+endif
+
+ifeq ($(findstring y,$(CONFIG_RTE_LIBRTE_MLX5_PMD)$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD)),y)
+DIRS-y += mlx5
+endif
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/common/cpt/Makefile b/src/spdk/dpdk/drivers/common/cpt/Makefile
new file mode 100644
index 000000000..cab9da73c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_cpt.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci
+EXPORT_MAP := rte_common_cpt_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += cpt_fpm_tables.c
+SRCS-y += cpt_pmd_ops_helper.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_common.h b/src/spdk/dpdk/drivers/common/cpt/cpt_common.h
new file mode 100644
index 000000000..56bfea495
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_common.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_COMMON_H_
+#define _CPT_COMMON_H_
+
+#include <rte_mempool.h>
+
+/*
+ * This file defines common macros and structs
+ */
+
+#define TIME_IN_RESET_COUNT 5
+
+/* Default command timeout in seconds */
+#define DEFAULT_COMMAND_TIMEOUT 4
+
+#define CPT_COUNT_THOLD 32
+#define CPT_TIMER_THOLD 0x3F
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
+
+struct cpt_qp_meta_info {
+ struct rte_mempool *pool;
+ int sg_mlen;
+ int lb_mlen;
+};
+
+struct rid {
+ /** Request id of a crypto operation */
+ uintptr_t rid;
+};
+
+/*
+ * Pending queue structure
+ *
+ */
+struct pending_queue {
+ /** Pending requests count */
+ uint64_t pending_count;
+ /** Array of pending requests */
+ struct rid *rid_queue;
+ /** Tail of queue to be used for enqueue */
+ uint16_t enq_tail;
+ /** Head of queue to be used for dequeue */
+ uint16_t deq_head;
+};
+
+struct cpt_request_info {
+ /** Data path fields */
+ uint64_t comp_baddr;
+ volatile uint64_t *completion_addr;
+ volatile uint64_t *alternate_caddr;
+ void *op;
+ struct {
+ uint64_t ei0;
+ uint64_t ei1;
+ uint64_t ei2;
+ uint64_t ei3;
+ } ist;
+ uint8_t *rptr;
+
+ /** Control path fields */
+ uint64_t time_out;
+ uint8_t extra_time;
+} __rte_cache_aligned;
+
+#endif /* _CPT_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_fpm_tables.c b/src/spdk/dpdk/drivers/common/cpt/cpt_fpm_tables.c
new file mode 100644
index 000000000..15b665db0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_fpm_tables.c
@@ -0,0 +1,1138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "cpt_mcode_defines.h"
+#include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
+
+/*
+ * CPT FPM table sizes Enumeration
+ *
+ * 15 table entries * (X, Y, Z coordinates) * Coordinate Offset
+ * Coordinate Offset depends on elliptic curve as mentioned below,
+ * 6 quadwords for P-192, P-224 and P-256
+ * 7 quadwords for P-384
+ * 9 quadwords for P-521
+ */
+typedef enum {
+ CPT_P192_LEN = 2160,
+ CPT_P224_LEN = 2160,
+ CPT_P256_LEN = 2160,
+ CPT_P384_LEN = 2520,
+ CPT_P521_LEN = 3240
+} cpt_fpm_len_t;
+
+/* FPM table address and length */
+struct fpm_entry {
+ const uint8_t *data;
+ int len;
+};
+
+/*
+ * Pre-computed ECC FMUL tables needed by cpt microcode
+ * for NIST curves P-192, P-256, P-384, P-521, P-224.
+ */
+
+static const uint8_t fpm_table_p192[CPT_P192_LEN] = {
+ 0xf4, 0xff, 0x0a, 0xfd, 0x82, 0xff, 0x10, 0x12, 0x7c, 0xbf, 0x20, 0xeb,
+ 0x43, 0xa1, 0x88, 0x00, 0x18, 0x8d, 0xa8, 0x0e, 0xb0, 0x30, 0x90, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0xf9, 0x77, 0xa1, 0x1e, 0x79, 0x48, 0x11, 0x63, 0x10, 0x11, 0xed,
+ 0x6b, 0x24, 0xcd, 0xd5, 0x07, 0x19, 0x2b, 0x95, 0xff, 0xc8, 0xda, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc3, 0x96, 0x49, 0xc5, 0x5d, 0x7c, 0x48, 0xd8, 0xeb, 0x2c, 0xdf, 0xae,
+ 0x5a, 0x92, 0x7c, 0x35, 0x67, 0xe3, 0x0c, 0xbd, 0xcb, 0xa6, 0x71, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7a, 0x83, 0xce, 0xe1, 0xec, 0xbf, 0xbe, 0x7d, 0xce, 0x32, 0xd0, 0x3c,
+ 0x06, 0x30, 0x15, 0x77, 0xa9, 0x35, 0x49, 0xc4, 0x58, 0x10, 0xf5, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x6f, 0x5e, 0xf8, 0x89, 0x66, 0xe3, 0xea, 0xd3, 0xf2, 0x9e, 0x6f, 0xea,
+ 0xdf, 0xc9, 0xbf, 0x1a, 0xce, 0x21, 0x6b, 0xb8, 0x45, 0x20, 0x06, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x46, 0xb9, 0x09, 0x2d, 0x92, 0x7b, 0x37, 0x79, 0x1d, 0x0a, 0xeb, 0x4b,
+ 0xb5, 0xb8, 0x0a, 0x20, 0xd9, 0x8a, 0x2e, 0xe2, 0x5a, 0xae, 0xc9, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xb1, 0x99, 0x63, 0xd8, 0xc0, 0xa1, 0xe3, 0x40, 0x47, 0x30, 0xd4, 0xf4,
+ 0x80, 0xd1, 0x09, 0x0b, 0x51, 0xa5, 0x81, 0xd9, 0x18, 0x4a, 0xc7, 0x37,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xec, 0xc5, 0x67, 0x31, 0xe6, 0x99, 0x12, 0xa5, 0x7c, 0xdf, 0xce, 0xa0,
+ 0x2f, 0x68, 0x3f, 0x16, 0x5b, 0xd8, 0x1e, 0xe2, 0xe0, 0xbb, 0x9f, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe4, 0xb1, 0x5a, 0x2d, 0xd4, 0xf4, 0x33, 0x74, 0x07, 0x57, 0xee, 0xa7,
+ 0xf2, 0x92, 0xc3, 0x41, 0x0c, 0x73, 0x06, 0x91, 0xd0, 0xf8, 0xdc, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xdf, 0x79, 0x78, 0x90, 0xbb, 0xf4, 0x5e, 0x00, 0x00, 0x8a, 0x9e, 0x83,
+ 0xe9, 0xde, 0x87, 0x08, 0x31, 0xb2, 0x4c, 0x31, 0x93, 0x54, 0xde, 0x3e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xcb, 0x5e, 0xc0, 0x43, 0xdd, 0xf6, 0x3a, 0xba, 0xc9, 0x4c, 0x21, 0xd9,
+ 0xf8, 0x4f, 0x41, 0xe1, 0xf0, 0xf4, 0x08, 0x83, 0x61, 0xd2, 0x44, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf3, 0x75, 0x85, 0xb0, 0x40, 0x64, 0x95, 0xf7, 0xe5, 0xde, 0x3b, 0x5b,
+ 0x16, 0xbc, 0xd0, 0xca, 0x27, 0x85, 0x3c, 0x1a, 0xe1, 0x3e, 0xa4, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd0, 0x74, 0x23, 0x2a, 0x8e, 0x8a, 0xe6, 0x8f, 0x74, 0x9e, 0x52, 0x8e,
+ 0xee, 0x29, 0xf7, 0xa9, 0x06, 0x11, 0xde, 0xa3, 0x97, 0x16, 0x46, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x66, 0xb8, 0x67, 0xdd, 0x0d, 0x80, 0x43, 0xcc, 0x6a, 0x65, 0x46, 0x54,
+ 0x3a, 0x72, 0x7d, 0xe6, 0xf9, 0x54, 0x60, 0x52, 0x83, 0x38, 0xbd, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xb6, 0xeb, 0x71, 0x93, 0x0c, 0x5d, 0x8f, 0x50, 0x1c, 0x24, 0x5c, 0x02,
+ 0xb9, 0x04, 0xb5, 0x96, 0x04, 0xbc, 0x1f, 0x71, 0x95, 0x1f, 0x75, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa4, 0xd0, 0x91, 0x6e, 0xbe, 0x34, 0x80, 0x3d, 0x8b, 0xec, 0x94, 0x8a,
+ 0x8c, 0x21, 0x96, 0x2a, 0x15, 0x00, 0x96, 0xe7, 0xfd, 0x69, 0xf8, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xbd, 0x44, 0xff, 0xe8, 0xe7, 0x1a, 0xac, 0x0c, 0x7d, 0x69, 0xa0, 0xb0,
+ 0x43, 0x22, 0xd0, 0x65, 0x9f, 0x56, 0xd9, 0x6c, 0xec, 0xa3, 0xba, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xee, 0x59, 0xf0, 0xd1, 0x25, 0xa5, 0x9d, 0xce, 0x83, 0x7d, 0x62, 0xdd,
+ 0xc3, 0xf4, 0x57, 0x5a, 0xa4, 0xe0, 0x7f, 0xb3, 0x35, 0xde, 0x73, 0xd9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xec, 0x76, 0x76, 0x0c, 0x1c, 0xf4, 0x6a, 0xe2, 0xff, 0x54, 0x98, 0x32,
+ 0xa3, 0x3d, 0x44, 0xb0, 0xe9, 0x5a, 0xd2, 0x10, 0xf3, 0x18, 0x5c, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x27, 0x3e, 0x5e, 0xc5, 0x38, 0xed, 0x37, 0x2e, 0x51, 0xd3, 0x91, 0x36,
+ 0xb0, 0xab, 0x11, 0x69, 0xa5, 0xea, 0x86, 0xf6, 0x8f, 0x3a, 0x27, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x29, 0x12, 0x37, 0xea, 0x74, 0xd2, 0xd7, 0xd5, 0x95, 0x36, 0x36, 0xee,
+ 0x56, 0x33, 0x8e, 0x9b, 0x0d, 0xa6, 0x5e, 0x86, 0x28, 0x5c, 0x12, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x02, 0xf0, 0x4c, 0xf1, 0x3c, 0x32, 0x33, 0xfc, 0x89, 0x8a, 0xb9,
+ 0x97, 0x83, 0x91, 0xb2, 0x26, 0xd6, 0x5c, 0x2e, 0x3a, 0xa0, 0x62, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd5, 0x09, 0x47, 0xa8, 0x18, 0xc5, 0xef, 0xe6, 0x45, 0xdb, 0x23, 0xae,
+ 0xfe, 0x11, 0x3c, 0x6c, 0x91, 0xf1, 0x99, 0xf2, 0xe5, 0xbb, 0xe8, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x37, 0x68, 0x81, 0xb6, 0x60, 0xfe, 0xc0, 0x64, 0x38, 0x73, 0x43, 0xe9,
+ 0x47, 0x5d, 0xae, 0xa4, 0xec, 0xcd, 0x57, 0xe8, 0xac, 0x8d, 0x8a, 0x19,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc9, 0xfe, 0xf5, 0xb9, 0x5b, 0x51, 0x02, 0x28, 0x37, 0x4c, 0x0a, 0x4c,
+ 0x19, 0x2e, 0xbc, 0xd6, 0x22, 0x98, 0xf2, 0x04, 0xce, 0x6a, 0x83, 0xf9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x46, 0xe4, 0xb8, 0x20, 0xf4, 0xc5, 0x74, 0xd0, 0x06, 0xd5, 0x86, 0x44,
+ 0xef, 0xeb, 0x2c, 0xc0, 0xe7, 0x13, 0xa4, 0x00, 0x10, 0xc3, 0xc9, 0x49,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x61, 0x78, 0xcb, 0x0e, 0x2d, 0x64, 0xee, 0xdf, 0x27, 0xaf, 0x7d, 0x5e,
+ 0xb8, 0x5e, 0x1f, 0x99, 0xd8, 0x73, 0xce, 0xd7, 0x6c, 0xb7, 0xbe, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xef, 0xc9, 0x12, 0x9c, 0x52, 0xa6, 0x7f, 0x9c, 0xa3, 0xd7, 0xb9, 0x57,
+ 0x60, 0x04, 0xd9, 0xad, 0xfc, 0x59, 0x98, 0x08, 0xdc, 0x41, 0xf8, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xbb, 0x6c, 0x6b, 0x59, 0x7f, 0xdf, 0x92, 0x8a, 0xad, 0x16, 0x7e, 0xf0,
+ 0xd7, 0xf9, 0x3b, 0xf4, 0xfa, 0xa9, 0xe4, 0x32, 0x15, 0x4e, 0x06, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0x3d, 0x0d, 0x63, 0xd5, 0x2c, 0x8f, 0x3f, 0x61, 0x01, 0xb2, 0xbe,
+ 0xd5, 0xf7, 0xe0, 0x8f, 0xd8, 0x77, 0xcd, 0xdd, 0xd6, 0xae, 0x3c, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+static const uint8_t fpm_table_p224[CPT_P224_LEN] = {
+ 0x34, 0x32, 0x80, 0xd6, 0x11, 0x5c, 0x1d, 0x21, 0x4a, 0x03, 0xc1, 0xd3,
+ 0x56, 0xc2, 0x11, 0x22, 0x6b, 0xb4, 0xbf, 0x7f, 0x32, 0x13, 0x90, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0xb7, 0x0e, 0x0c, 0xbd, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34, 0xcd, 0x43, 0x75, 0xa0,
+ 0x5a, 0x07, 0x47, 0x64, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0xbd, 0x37, 0x63, 0x88, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc8, 0xae, 0x4d, 0x94, 0xff, 0x48, 0xdb, 0xc5, 0xb5, 0xc8, 0x8b, 0x66,
+ 0x32, 0xc8, 0x7a, 0x44, 0x66, 0xc7, 0x27, 0x87, 0x2b, 0x8d, 0x08, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x5b, 0xe5, 0xde, 0x8b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc6, 0xf8, 0x1e, 0x08, 0x47, 0xfb, 0x64, 0xdb, 0xc8, 0xe3, 0x75, 0x3e,
+ 0x9d, 0x5a, 0x58, 0x31, 0xa2, 0x13, 0x38, 0x8c, 0x65, 0x8a, 0x02, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0x52, 0x6c, 0x0d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xee, 0x8f, 0x93, 0x0d, 0x2b, 0x30, 0x9e, 0xe8, 0xb6, 0x78, 0xea, 0x1a,
+ 0x0f, 0x59, 0x7e, 0x02, 0x14, 0x74, 0x52, 0x56, 0x6c, 0x25, 0x7d, 0x3e,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0xbe, 0x54, 0xb7, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf6, 0x12, 0x1f, 0xdd, 0x96, 0xa2, 0x05, 0xda, 0x12, 0xa8, 0xe4, 0xf9,
+ 0x98, 0x15, 0x8e, 0xe1, 0x1b, 0x1d, 0x05, 0x44, 0x47, 0xf2, 0xc3, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x32, 0xf7, 0x1c, 0x32, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x26, 0x73, 0x99, 0x28, 0x0e, 0x4e, 0x09, 0x58, 0x79, 0xab, 0xae, 0x5c,
+ 0xa8, 0xeb, 0x9c, 0x0b, 0xe9, 0xa8, 0xac, 0xf0, 0x74, 0x0e, 0xa3, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xb6, 0xce, 0x42, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x92, 0x09, 0xdc, 0xfe, 0x40, 0x85, 0x7c, 0x64, 0xa2, 0x3f, 0xe4, 0x34,
+ 0x50, 0xb4, 0x25, 0x87, 0x2a, 0x6f, 0x38, 0x62, 0xb6, 0xfe, 0x44, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x9e, 0xd1, 0x3b, 0x1b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf9, 0xfb, 0xf4, 0x91, 0x9a, 0x5f, 0x1c, 0x42, 0x56, 0x8b, 0xc4, 0x34,
+ 0x8a, 0x69, 0xdd, 0x65, 0x3d, 0x01, 0x11, 0x6e, 0x47, 0x78, 0xdf, 0x49,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0x65, 0xff, 0xd2, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xca, 0x4b, 0x80, 0x12, 0xe5, 0x3d, 0x3f, 0xb4, 0xe7, 0x61, 0x44, 0x25,
+ 0x89, 0xec, 0x86, 0x76, 0x1f, 0xde, 0x69, 0x6f, 0xcb, 0x2b, 0xe1, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0xc5, 0xf0, 0x98, 0xeb, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe6, 0xf2, 0x88, 0xf1, 0x39, 0xb0, 0xa5, 0xad, 0x0c, 0x76, 0x16, 0x85,
+ 0x58, 0x72, 0xd0, 0x94, 0x8a, 0xbc, 0x69, 0xb6, 0x6d, 0x7a, 0xc7, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x64, 0xe1, 0x68, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe1, 0x94, 0x7b, 0x17, 0x96, 0xa9, 0x76, 0xcf, 0x67, 0x78, 0x38, 0x8a,
+ 0xd9, 0xb7, 0x68, 0xd7, 0x75, 0x39, 0xb7, 0x24, 0x17, 0x76, 0xa8, 0x39,
+ 0x00, 0x00, 0x00, 0x00, 0xb4, 0xdb, 0x5a, 0x94, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0x5a, 0x09, 0xbe, 0xdf, 0x5b, 0x10, 0x74, 0x74, 0xc0, 0xce, 0xf5,
+ 0x95, 0xab, 0xef, 0xf7, 0xba, 0x69, 0x9c, 0xe4, 0x14, 0xb3, 0x90, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0xca, 0x10, 0xcb, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x55, 0x87, 0xbb, 0x19, 0x41, 0x02, 0x67, 0xde, 0xa7, 0x71, 0xb8, 0xce,
+ 0xa6, 0x3f, 0xcc, 0x78, 0xbc, 0xa5, 0x91, 0x7d, 0x0e, 0x8d, 0x16, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x63, 0x9e, 0x9e, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe8, 0xb7, 0x37, 0x97, 0x4e, 0x1b, 0xfb, 0x82, 0x16, 0x71, 0x58, 0x39,
+ 0x55, 0xb6, 0x32, 0xb8, 0xf6, 0x75, 0x2f, 0xdd, 0x34, 0x7a, 0x0d, 0x7b,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x24, 0xb5, 0x67, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xea, 0xce, 0x1f, 0x14, 0x00, 0xe2, 0xe0, 0x16, 0x71, 0xe5, 0x9b, 0x8c,
+ 0x60, 0x8d, 0x20, 0x97, 0x2b, 0x07, 0xec, 0x89, 0x89, 0x37, 0x5d, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe2, 0x8c, 0xdc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x9a, 0x88, 0x5f, 0xdf, 0xf5, 0x12, 0x07, 0xbf, 0xc5, 0xbd, 0xd8, 0x15,
+ 0x01, 0x55, 0x75, 0x02, 0xf1, 0x96, 0x50, 0x03, 0x6b, 0xbd, 0xd0, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0xda, 0x9b, 0x7e, 0x0c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf2, 0xa7, 0x4c, 0xce, 0x47, 0xb9, 0x7d, 0x42, 0xf7, 0xb5, 0x5d, 0x63,
+ 0x28, 0xf5, 0xde, 0x75, 0x06, 0x13, 0xe9, 0x5d, 0x9b, 0x48, 0x88, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0xd6, 0x05, 0x07, 0x13, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa4, 0xee, 0x4e, 0xe7, 0xb4, 0x4a, 0xda, 0x48, 0xe3, 0x67, 0x5f, 0xf9,
+ 0x15, 0xda, 0x1a, 0x27, 0x33, 0x6b, 0x97, 0x6a, 0x82, 0x0d, 0xa0, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x5b, 0x6e, 0x9f, 0xfd, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf5, 0x2c, 0xd8, 0x87, 0x2f, 0xf4, 0xa8, 0x85, 0x53, 0x29, 0x86, 0xf1,
+ 0xb9, 0x1a, 0x28, 0xdc, 0xaf, 0x35, 0x40, 0xdd, 0x75, 0xbf, 0x86, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0xce, 0x4f, 0x78, 0xa9, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x4d, 0x51, 0xfa, 0xb9, 0xe8, 0x49, 0xeb, 0x79, 0xd4, 0xc3, 0x0d, 0x04,
+ 0xdd, 0xf6, 0xb1, 0xcd, 0x6d, 0x51, 0xe5, 0x58, 0x06, 0x2a, 0x7e, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0xe6, 0xe4, 0xf0, 0x9a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe0, 0x85, 0xf1, 0x4f, 0xe7, 0x07, 0x60, 0xb5, 0x45, 0x7a, 0xc8, 0x3d,
+ 0xf3, 0x26, 0xd8, 0xb0, 0xc7, 0x94, 0x33, 0x72, 0x7e, 0xef, 0x87, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x74, 0x6d, 0x83, 0xb0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7a, 0x24, 0xee, 0x7e, 0x21, 0x7b, 0x0a, 0x33, 0x4e, 0xa1, 0x1e, 0x5c,
+ 0xca, 0x30, 0xf9, 0x68, 0xc2, 0xc4, 0x7d, 0x0a, 0xa0, 0xc7, 0xbe, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0xca, 0x62, 0xa6, 0x8b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5c, 0x26, 0x7c, 0x9c, 0xfb, 0xe5, 0x6a, 0x33, 0x89, 0x37, 0x74, 0x21,
+ 0x13, 0x71, 0x46, 0x6b, 0x60, 0xd0, 0x38, 0xc4, 0x90, 0xef, 0x7d, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x27, 0xde, 0xcf, 0x82, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x14, 0xf5, 0x42, 0xbe, 0x9e, 0x07, 0x9c, 0x4a, 0x60, 0x56, 0x3b,
+ 0xcf, 0xe2, 0x06, 0x81, 0xb0, 0xc0, 0x46, 0x49, 0xfb, 0x97, 0x61, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x2f, 0xa4, 0x2d, 0x2b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xe5, 0x99, 0xb4, 0xcf, 0x64, 0x05, 0x25, 0xa3, 0x44, 0xe4, 0x18,
+ 0x5f, 0x72, 0x58, 0x47, 0x7f, 0xbd, 0x84, 0xd7, 0x0a, 0x38, 0xa0, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x8d, 0x15, 0xd5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe8, 0x5b, 0x13, 0xf0, 0x09, 0xc6, 0x14, 0x58, 0xc4, 0xf3, 0x1b, 0x3d,
+ 0x17, 0x0f, 0x1c, 0xfa, 0x7d, 0x61, 0x7e, 0x7e, 0x9c, 0xea, 0x52, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x89, 0x05, 0xdb, 0xb7, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x26, 0xf8, 0xab, 0x68, 0xb4, 0x27, 0x56, 0x1d, 0x04, 0xed, 0x8c, 0x65,
+ 0xfd, 0xd1, 0x62, 0x2e, 0x80, 0x4c, 0x4a, 0x1d, 0x67, 0x90, 0x50, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x3b, 0x02, 0x2a, 0x4b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x11, 0xe9, 0x36, 0x25, 0x8d, 0x97, 0x84, 0xea, 0xe9, 0x50, 0x4a, 0x27,
+ 0x66, 0x6e, 0x0c, 0xd2, 0xce, 0x40, 0xfe, 0xfb, 0xf2, 0xc6, 0x53, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0xb6, 0x32, 0x22, 0x76, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfc, 0x88, 0xcf, 0x76, 0x3c, 0x0b, 0x0d, 0x76, 0xb2, 0xc3, 0xc7, 0x8c,
+ 0x8c, 0x53, 0x5f, 0x4c, 0xba, 0x0d, 0x13, 0xdb, 0x7b, 0xac, 0xf0, 0x19,
+ 0x00, 0x00, 0x00, 0x00, 0x7e, 0x11, 0x95, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+static const uint8_t fpm_table_p256[CPT_P256_LEN] = {
+ 0xf4, 0xa1, 0x39, 0x45, 0xd8, 0x98, 0xc2, 0x96, 0x77, 0x03, 0x7d, 0x81,
+ 0x2d, 0xeb, 0x33, 0xa0, 0xf8, 0xbc, 0xe6, 0xe5, 0x63, 0xa4, 0x40, 0xf2,
+ 0x6b, 0x17, 0xd1, 0xf2, 0xe1, 0x2c, 0x42, 0x47, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5, 0x2b, 0xce, 0x33, 0x57,
+ 0x6b, 0x31, 0x5e, 0xce, 0x8e, 0xe7, 0xeb, 0x4a, 0x7c, 0x0f, 0x9e, 0x16,
+ 0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x90, 0xe7, 0x5c, 0xb4, 0x8e, 0x14, 0xdb, 0x63, 0x29, 0x49, 0x3b, 0xaa,
+ 0xad, 0x65, 0x1f, 0x7e, 0x84, 0x92, 0x59, 0x2e, 0x32, 0x6e, 0x25, 0xde,
+ 0x0f, 0xa8, 0x22, 0xbc, 0x28, 0x11, 0xaa, 0xa5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe4, 0x11, 0x24, 0x54, 0x5f, 0x46, 0x2e, 0xe7, 0x34, 0xb1, 0xa6, 0x50,
+ 0x50, 0xfe, 0x82, 0xf5, 0x6f, 0x4a, 0xd4, 0xbc, 0xb3, 0xdf, 0x18, 0x8b,
+ 0xbf, 0xf4, 0x4a, 0xe8, 0xf5, 0xdb, 0xa8, 0x0d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x93, 0x39, 0x1c, 0xe2, 0x09, 0x79, 0x92, 0xaf, 0xe9, 0x6c, 0x98, 0xfd,
+ 0x0d, 0x35, 0xf1, 0xfa, 0xb2, 0x57, 0xc0, 0xde, 0x95, 0xe0, 0x27, 0x89,
+ 0x30, 0x0a, 0x4b, 0xbc, 0x89, 0xd6, 0x72, 0x6f, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xaa, 0x54, 0xa2, 0x91, 0xc0, 0x81, 0x27, 0xa0, 0x5b, 0xb1, 0xee, 0xad,
+ 0xa9, 0xd8, 0x06, 0xa5, 0x7f, 0x1d, 0xdb, 0x25, 0xff, 0x1e, 0x3c, 0x6f,
+ 0x72, 0xaa, 0xc7, 0xe0, 0xd0, 0x9b, 0x46, 0x44, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x57, 0xc8, 0x4f, 0xc9, 0xd7, 0x89, 0xbd, 0x85, 0xfc, 0x35, 0xff, 0x7d,
+ 0xc2, 0x97, 0xea, 0xc3, 0xfb, 0x98, 0x2f, 0xd5, 0x88, 0xc6, 0x76, 0x6e,
+ 0x44, 0x7d, 0x73, 0x9b, 0xee, 0xdb, 0x5e, 0x67, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0x7e, 0x33, 0xc9, 0x72, 0xe2, 0x5b, 0x32, 0x3d, 0x34, 0x9b, 0x95,
+ 0xa7, 0xfa, 0xe5, 0x00, 0xe1, 0x2e, 0x9d, 0x95, 0x3a, 0x4a, 0xaf, 0xf7,
+ 0x2d, 0x48, 0x25, 0xab, 0x83, 0x41, 0x31, 0xee, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x94, 0x9c, 0x93, 0x2a, 0x1d, 0x36, 0x7f, 0xef, 0x7f, 0xbd, 0x2b,
+ 0x1a, 0x0a, 0x11, 0xb7, 0xdd, 0xc6, 0x06, 0x8b, 0xb9, 0x1d, 0xfc, 0x60,
+ 0xef, 0x95, 0x19, 0x32, 0x8a, 0x9c, 0x72, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x19, 0x60, 0x35, 0xa7, 0x73, 0x76, 0xd8, 0xa8, 0x23, 0x18, 0x3b, 0x08,
+ 0x95, 0xca, 0x17, 0x40, 0xc1, 0xee, 0x98, 0x07, 0x02, 0x2c, 0x21, 0x9c,
+ 0x61, 0x1e, 0x9f, 0xc3, 0x7d, 0xbb, 0x2c, 0x9b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xca, 0xe2, 0xb1, 0x92, 0x0b, 0x57, 0xf4, 0xbc, 0x29, 0x36, 0xdf, 0x5e,
+ 0xc6, 0xc9, 0xbc, 0x36, 0x7d, 0xea, 0x64, 0x82, 0xe1, 0x12, 0x38, 0xbf,
+ 0x55, 0x06, 0x63, 0x79, 0x7b, 0x51, 0xf5, 0xd8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x44, 0xff, 0xe2, 0x16, 0x34, 0x8a, 0x96, 0x4c, 0x9f, 0xb3, 0xd5, 0x76,
+ 0xdb, 0xde, 0xfb, 0xe1, 0x0a, 0xfa, 0x40, 0x01, 0x8d, 0x9d, 0x50, 0xe5,
+ 0x15, 0x71, 0x64, 0x84, 0x8a, 0xec, 0xb8, 0x51, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe4, 0x8e, 0xca, 0xff, 0xfc, 0x5c, 0xde, 0x01, 0x7c, 0xcd, 0x84, 0xe7,
+ 0x0d, 0x71, 0x5f, 0x26, 0xa2, 0xe8, 0xf4, 0x83, 0xf4, 0x3e, 0x43, 0x91,
+ 0xeb, 0x5d, 0x77, 0x45, 0xb2, 0x11, 0x41, 0xea, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xca, 0xc9, 0x17, 0xe2, 0x73, 0x1a, 0x34, 0x79, 0x85, 0xf2, 0x2c, 0xfe,
+ 0x28, 0x44, 0xb6, 0x45, 0x09, 0x90, 0xe6, 0xa1, 0x58, 0x00, 0x6c, 0xee,
+ 0xea, 0xfd, 0x72, 0xeb, 0xdb, 0xec, 0xc1, 0x7b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x6c, 0xf2, 0x0f, 0xfb, 0x31, 0x37, 0x28, 0xbe, 0x96, 0x43, 0x95, 0x91,
+ 0xa3, 0xc6, 0xb9, 0x4a, 0x27, 0x36, 0xff, 0x83, 0x44, 0x31, 0x5f, 0xc5,
+ 0xa6, 0xd3, 0x96, 0x77, 0xa7, 0x84, 0x92, 0x76, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf2, 0xba, 0xb8, 0x33, 0xc3, 0x57, 0xf5, 0xf4, 0x82, 0x4a, 0x92, 0x0c,
+ 0x22, 0x84, 0x05, 0x9b, 0x66, 0xb8, 0xba, 0xbd, 0x2d, 0x27, 0xec, 0xdf,
+ 0x67, 0x4f, 0x84, 0x74, 0x9b, 0x0b, 0x88, 0x16, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2d, 0xf4, 0x8c, 0x04, 0x67, 0x7c, 0x8a, 0x3e, 0x74, 0xe0, 0x2f, 0x08,
+ 0x02, 0x03, 0xa5, 0x6b, 0x31, 0x85, 0x5f, 0x7d, 0xb8, 0xc7, 0xfe, 0xdb,
+ 0x4e, 0x76, 0x9e, 0x76, 0x72, 0xc9, 0xdd, 0xad, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa4, 0xc3, 0x61, 0x65, 0xb8, 0x24, 0xbb, 0xb0, 0xfb, 0x9a, 0xe1, 0x6f,
+ 0x3b, 0x91, 0x22, 0xa5, 0x1e, 0xc0, 0x05, 0x72, 0x06, 0x94, 0x72, 0x81,
+ 0x42, 0xb9, 0x90, 0x82, 0xde, 0x83, 0x06, 0x63, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x6e, 0xf9, 0x51, 0x50, 0xdd, 0xa8, 0x68, 0xb9, 0xd1, 0xf8, 0x9e, 0x79,
+ 0x9c, 0x0c, 0xe1, 0x31, 0x7f, 0xdc, 0x1c, 0xa0, 0x08, 0xa1, 0xc4, 0x78,
+ 0x78, 0x87, 0x8e, 0xf6, 0x1c, 0x6c, 0xe0, 0x4d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x9c, 0x62, 0xb9, 0x12, 0x1f, 0xe0, 0xd9, 0x76, 0x6a, 0xce, 0x57, 0x0e,
+ 0xbd, 0xe0, 0x8d, 0x4f, 0xde, 0x53, 0x14, 0x2c, 0x12, 0x30, 0x9d, 0xef,
+ 0xb6, 0xcb, 0x3f, 0x5d, 0x7b, 0x72, 0xc3, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0x99, 0x1e, 0xd2, 0xc3, 0x1a, 0x35, 0x73, 0x5b, 0x82, 0xdd, 0x5b,
+ 0xd5, 0x4f, 0xb4, 0x96, 0x59, 0x5c, 0x52, 0x20, 0x81, 0x2f, 0xfc, 0xae,
+ 0x0c, 0x88, 0xbc, 0x4d, 0x71, 0x6b, 0x12, 0x87, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3a, 0x57, 0xbf, 0x63, 0x5f, 0x48, 0xac, 0xa8, 0x7c, 0x81, 0x81, 0xf4,
+ 0xdf, 0x25, 0x64, 0xf3, 0x18, 0xd1, 0xb5, 0xb3, 0x9c, 0x04, 0xe6, 0xaa,
+ 0xdd, 0x5d, 0xde, 0xa3, 0xf3, 0x90, 0x1d, 0xc6, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe9, 0x6a, 0x79, 0xfb, 0x3e, 0x72, 0xad, 0x0c, 0x43, 0xa0, 0xa2, 0x8c,
+ 0x42, 0xba, 0x79, 0x2f, 0xef, 0xe0, 0xa4, 0x23, 0x08, 0x3e, 0x49, 0xf3,
+ 0x68, 0xf3, 0x44, 0xaf, 0x6b, 0x31, 0x74, 0x66, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xcd, 0xfe, 0x17, 0xdb, 0x3f, 0xb2, 0x4d, 0x4a, 0x66, 0x8b, 0xfc, 0x22,
+ 0x71, 0xf5, 0xc6, 0x26, 0x60, 0x4e, 0xd9, 0x3c, 0x24, 0xd6, 0x7f, 0xf3,
+ 0x31, 0xb9, 0xc4, 0x05, 0xf8, 0x54, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd3, 0x6b, 0x47, 0x89, 0xa2, 0x58, 0x2e, 0x7f, 0x0d, 0x1a, 0x10, 0x14,
+ 0x4e, 0xc3, 0x9c, 0x28, 0x66, 0x3c, 0x62, 0xc3, 0xed, 0xba, 0xd7, 0xa0,
+ 0x40, 0x52, 0xbf, 0x4b, 0x6f, 0x46, 0x1d, 0xb9, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x5a, 0x27, 0xc3, 0x18, 0x8d, 0x25, 0xeb, 0xe7, 0x24, 0xf3, 0x39,
+ 0x99, 0xbf, 0xcc, 0x5b, 0x86, 0x2b, 0xe6, 0xbd, 0x71, 0xd7, 0x0c, 0xc8,
+ 0xfe, 0xcf, 0x4d, 0x51, 0x90, 0xb0, 0xfc, 0x61, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x74, 0x34, 0x6c, 0x10, 0xa1, 0xd4, 0xcf, 0xac, 0xaf, 0xdf, 0x5c, 0xc0,
+ 0x85, 0x26, 0xa7, 0xa4, 0x12, 0x32, 0x02, 0xa8, 0xf6, 0x2b, 0xff, 0x7a,
+ 0x1e, 0xdd, 0xba, 0xe2, 0xc8, 0x02, 0xe4, 0x1a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8f, 0xa0, 0xaf, 0x2d, 0xd6, 0x03, 0xf8, 0x44, 0x36, 0xe0, 0x6b, 0x7e,
+ 0x4c, 0x70, 0x19, 0x17, 0x0c, 0x45, 0xf4, 0x52, 0x73, 0xdb, 0x33, 0xa0,
+ 0x43, 0x10, 0x4d, 0x86, 0x56, 0x0e, 0xbc, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x96, 0x15, 0xb5, 0x11, 0x0d, 0x1d, 0x78, 0xe5, 0x66, 0xb0, 0xde, 0x32,
+ 0x25, 0xc4, 0x74, 0x4b, 0x0a, 0x4a, 0x46, 0xfb, 0x6a, 0xaf, 0x36, 0x3a,
+ 0xb4, 0x8e, 0x26, 0xb4, 0x84, 0xf7, 0xa2, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0xeb, 0xb0, 0xf6, 0x21, 0xa0, 0x1b, 0x2d, 0xc0, 0x04, 0xe4, 0x04,
+ 0x8b, 0x7b, 0x0f, 0x98, 0x64, 0x13, 0x1b, 0xcd, 0xfe, 0xd6, 0xf6, 0x68,
+ 0xfa, 0xc0, 0x15, 0x40, 0x4d, 0x4d, 0x3d, 0xab, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+static const uint8_t fpm_table_p384[CPT_P384_LEN] = {
+ 0x3a, 0x54, 0x5e, 0x38, 0x72, 0x76, 0x0a, 0xb7, 0x55, 0x02, 0xf2, 0x5d,
+ 0xbf, 0x55, 0x29, 0x6c, 0x59, 0xf7, 0x41, 0xe0, 0x82, 0x54, 0x2a, 0x38,
+ 0x6e, 0x1d, 0x3b, 0x62, 0x8b, 0xa7, 0x9b, 0x98, 0x8e, 0xb1, 0xc7, 0x1e,
+ 0xf3, 0x20, 0xad, 0x74, 0xaa, 0x87, 0xca, 0x22, 0xbe, 0x8b, 0x05, 0x37,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x43, 0x1d, 0x7c,
+ 0x90, 0xea, 0x0e, 0x5f, 0x0a, 0x60, 0xb1, 0xce, 0x1d, 0x7e, 0x81, 0x9d,
+ 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0xf8, 0xf4, 0x1d, 0xbd,
+ 0x28, 0x9a, 0x14, 0x7c, 0x5d, 0x9e, 0x98, 0xbf, 0x92, 0x92, 0xdc, 0x29,
+ 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x39, 0xc1, 0xb3, 0x28, 0xd8, 0xee, 0x21, 0xc9, 0x2c, 0x3e, 0x0c, 0x91,
+ 0x55, 0x87, 0x17, 0xdb, 0x4b, 0x58, 0x80, 0x8b, 0x3f, 0x86, 0x86, 0xa9,
+ 0x43, 0x60, 0x39, 0x09, 0x18, 0x14, 0x1b, 0x1a, 0xd6, 0xe9, 0x8b, 0x0d,
+ 0x37, 0xca, 0x7a, 0xbc, 0xf5, 0x32, 0x38, 0x9a, 0x06, 0x0c, 0xbd, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x7e, 0x18, 0x39,
+ 0x23, 0xd8, 0x6e, 0xcd, 0x31, 0xea, 0x31, 0xb1, 0x08, 0x5a, 0x4e, 0x9a,
+ 0xbc, 0x40, 0xce, 0x5a, 0xbe, 0x64, 0x36, 0x03, 0xbd, 0x22, 0xcf, 0xb2,
+ 0xa2, 0x12, 0x41, 0x63, 0x6f, 0x04, 0xca, 0xa2, 0xde, 0x3a, 0x82, 0xba,
+ 0xb9, 0xd2, 0x85, 0x2c, 0xc3, 0xb3, 0x8e, 0x69, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x26, 0x4e, 0x52, 0x46, 0xeb, 0x09, 0xa0, 0xe5, 0xf8, 0xf4, 0xbe, 0x11,
+ 0x32, 0xcd, 0xf0, 0x3c, 0xda, 0x9d, 0x54, 0x83, 0x5f, 0xae, 0xfa, 0x4f,
+ 0xbb, 0xbc, 0x4f, 0xd0, 0x17, 0xa3, 0x1b, 0x22, 0xc3, 0xde, 0xcd, 0x0c,
+ 0x86, 0xf0, 0x61, 0x45, 0x52, 0x8e, 0xf1, 0x67, 0x0a, 0x5f, 0x2c, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x1e, 0x98, 0x58,
+ 0xc1, 0x4f, 0x0d, 0xd6, 0x55, 0x05, 0x38, 0xa8, 0x09, 0xcb, 0x75, 0x24,
+ 0xbd, 0x60, 0xca, 0xb4, 0xc8, 0x7f, 0xed, 0x22, 0xf8, 0xb7, 0x6f, 0xdd,
+ 0x63, 0x1d, 0x05, 0x8d, 0x58, 0x03, 0xea, 0xa1, 0x1a, 0x1d, 0xcf, 0x14,
+ 0x7b, 0x9b, 0x1f, 0xbe, 0x7b, 0xcc, 0xf5, 0x6c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa6, 0x28, 0xb0, 0x9a, 0xaa, 0x03, 0xbd, 0x53, 0xba, 0x06, 0x54, 0x58,
+ 0xa4, 0xf5, 0x2d, 0x78, 0xdb, 0x29, 0x87, 0x89, 0x4d, 0x10, 0xdd, 0xea,
+ 0xb4, 0x2a, 0x31, 0xaf, 0x8a, 0x3e, 0x29, 0x7d, 0x40, 0xf7, 0xf9, 0xe7,
+ 0x06, 0x42, 0x12, 0x79, 0xc1, 0x9e, 0x0b, 0x4c, 0x80, 0x01, 0x19, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0x2d, 0x0f, 0xc5,
+ 0xe6, 0xc8, 0x8c, 0x41, 0xaf, 0x68, 0xaa, 0x6d, 0xe6, 0x39, 0xd8, 0x58,
+ 0xc1, 0xc7, 0xca, 0xd1, 0x35, 0xf6, 0xeb, 0xf2, 0x57, 0x7a, 0x30, 0xea,
+ 0xe3, 0x56, 0x7a, 0xf9, 0xe5, 0xa0, 0x19, 0x1d, 0x1f, 0x5b, 0x77, 0xf6,
+ 0x16, 0xf3, 0xfd, 0xbf, 0x03, 0x56, 0xb3, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x30, 0x99, 0x15, 0x60, 0xaa, 0x13, 0x39, 0x09, 0x90, 0x97, 0xdb, 0xb1,
+ 0xc6, 0xcb, 0x00, 0x17, 0xd3, 0x7d, 0xe4, 0x24, 0xb8, 0x60, 0xfa, 0xe6,
+ 0x9b, 0xb1, 0x83, 0xb2, 0x70, 0xb3, 0x75, 0xdd, 0x56, 0x7a, 0x62, 0x33,
+ 0xcd, 0x6c, 0xe3, 0xa3, 0xaa, 0xb8, 0xbb, 0x9f, 0x0f, 0xdc, 0x30, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xc5, 0xb9, 0x81,
+ 0x60, 0x0a, 0xd5, 0xa6, 0xeb, 0xdf, 0x73, 0xf2, 0xd6, 0x2f, 0xaa, 0x44,
+ 0x6d, 0x95, 0x5b, 0xb3, 0xc9, 0x74, 0x7b, 0xf3, 0xf6, 0x00, 0x5f, 0xc8,
+ 0x15, 0xeb, 0x04, 0xac, 0xf0, 0xaf, 0x01, 0xd1, 0x28, 0x20, 0x50, 0xb5,
+ 0x48, 0x94, 0x2f, 0x81, 0x31, 0x4f, 0x6d, 0x28, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x22, 0x11, 0x21, 0x77, 0x16, 0x60, 0x5e, 0x23, 0x47, 0xd2, 0xc8,
+ 0x9e, 0xf2, 0x81, 0xc8, 0x54, 0xba, 0x45, 0x99, 0x56, 0x7d, 0x63, 0x42,
+ 0xce, 0x0f, 0xba, 0x30, 0x77, 0xc0, 0xf0, 0x3f, 0x70, 0x22, 0xf8, 0x02,
+ 0xcb, 0x36, 0x74, 0x44, 0x73, 0x34, 0xa9, 0x36, 0xa9, 0xa6, 0xa0, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x46, 0x1f, 0x68,
+ 0xd6, 0x58, 0xa0, 0x1a, 0x0a, 0x64, 0xd5, 0x19, 0xc2, 0xbd, 0x0e, 0xfa,
+ 0x9e, 0x2e, 0xee, 0x8f, 0x69, 0x7a, 0x92, 0x80, 0x8e, 0x5d, 0x9b, 0x89,
+ 0x7d, 0x0e, 0x01, 0x7a, 0x1f, 0x7c, 0x5c, 0x36, 0x7c, 0xbd, 0x4c, 0xcd,
+ 0x7f, 0xfc, 0xef, 0xf7, 0xf6, 0x32, 0xc9, 0x26, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x30, 0x0a, 0xe2, 0xe6, 0x0e, 0x75, 0x83, 0x44, 0x45, 0x1c, 0x70, 0x7a,
+ 0x37, 0x1a, 0x2c, 0xa5, 0x25, 0x65, 0x1d, 0x10, 0x50, 0x52, 0xdd, 0x32,
+ 0xbf, 0x88, 0xde, 0x7f, 0x48, 0x62, 0xb9, 0x54, 0xfa, 0xfc, 0xe2, 0x6e,
+ 0x03, 0x81, 0xef, 0x13, 0xdc, 0x91, 0x6c, 0x17, 0x96, 0x0e, 0x09, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x17, 0xcc, 0x44,
+ 0x02, 0x6b, 0x08, 0x89, 0x95, 0xc0, 0x1f, 0xf1, 0x9b, 0x42, 0x44, 0x1b,
+ 0x40, 0x89, 0x64, 0x78, 0xcc, 0x16, 0x06, 0x97, 0x52, 0xd1, 0x54, 0xb8,
+ 0x0b, 0xa0, 0x4a, 0x35, 0xb3, 0xd9, 0x2e, 0xa4, 0x70, 0x1c, 0x29, 0x52,
+ 0x26, 0x6e, 0x8a, 0x40, 0xd6, 0x9e, 0xca, 0x0a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe4, 0xbf, 0xc2, 0xc0, 0x49, 0x05, 0xca, 0x71, 0xf3, 0x3a, 0x45, 0x0a,
+ 0xd1, 0x56, 0xf7, 0x61, 0x3d, 0x8b, 0x29, 0xdb, 0xd0, 0x88, 0x48, 0xc2,
+ 0x09, 0x7d, 0xa3, 0x95, 0xa2, 0x30, 0x96, 0x86, 0x21, 0x19, 0x05, 0x03,
+ 0x5f, 0x49, 0x72, 0xd7, 0xb2, 0xd1, 0x05, 0x58, 0x17, 0xcb, 0xaa, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xce, 0xbb, 0x55,
+ 0x75, 0x3e, 0xe3, 0x24, 0xe8, 0x7a, 0xb0, 0x7c, 0x69, 0x24, 0x66, 0x6f,
+ 0x9b, 0x47, 0x5d, 0x74, 0x4e, 0xcf, 0x1a, 0x68, 0xf8, 0x2b, 0xe8, 0xf5,
+ 0x2e, 0x62, 0x36, 0xc0, 0x23, 0x7c, 0x0d, 0xba, 0x3c, 0xfd, 0x05, 0x6b,
+ 0x35, 0x4c, 0xd8, 0x72, 0xc3, 0xc6, 0xcb, 0xd2, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8d, 0x10, 0x4d, 0x24, 0x70, 0x8d, 0x4c, 0xee, 0x19, 0x7d, 0x69, 0x58,
+ 0x81, 0x9c, 0xf0, 0x43, 0x47, 0xfc, 0x87, 0xfa, 0xf0, 0x71, 0x22, 0x10,
+ 0x10, 0x3d, 0xf7, 0x85, 0x5c, 0x20, 0x15, 0x58, 0x30, 0xb0, 0xa9, 0xe8,
+ 0x61, 0x1e, 0xf6, 0x38, 0x00, 0xb1, 0x9a, 0xc8, 0xfd, 0xfe, 0xbf, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x0e, 0x8d, 0x6f,
+ 0xd2, 0x01, 0xe0, 0x3e, 0xbb, 0x7c, 0x96, 0x9c, 0x22, 0x28, 0xff, 0x5f,
+ 0x68, 0x81, 0x02, 0x82, 0x63, 0x61, 0x64, 0xc5, 0xcd, 0xbb, 0x3c, 0xd2,
+ 0xe7, 0x54, 0x22, 0x0d, 0x14, 0x18, 0xfe, 0x25, 0xe9, 0xf6, 0xed, 0xc4,
+ 0xa7, 0x2f, 0x91, 0x05, 0x9e, 0xe3, 0x60, 0x31, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0xd2, 0xc2, 0x73, 0xb7, 0x69, 0x73, 0x7a, 0x2c, 0xc0, 0x24, 0x51,
+ 0x97, 0xd5, 0x3f, 0xfd, 0xc3, 0xb6, 0xac, 0x4b, 0xe8, 0x6c, 0x46, 0xbd,
+ 0x17, 0xe9, 0x41, 0x1f, 0x68, 0x5e, 0x92, 0x6d, 0x13, 0x6d, 0xf3, 0x6b,
+ 0x75, 0x20, 0x3a, 0x36, 0x3f, 0x95, 0x61, 0xe0, 0x8b, 0xf0, 0xb2, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0x6f, 0xf8, 0xd5,
+ 0x27, 0xe9, 0x90, 0xa7, 0xc3, 0x4b, 0xe5, 0x86, 0xf9, 0x86, 0x7a, 0x60,
+ 0xea, 0x08, 0x87, 0x47, 0x85, 0x54, 0xe0, 0x14, 0xcf, 0xce, 0xd6, 0x64,
+ 0x6f, 0x52, 0xe4, 0xcb, 0x4b, 0x1a, 0x5a, 0x20, 0x41, 0x2a, 0xb6, 0x41,
+ 0x0b, 0x06, 0xf0, 0x06, 0x39, 0x62, 0x95, 0x87, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x4c, 0x0d, 0xd2, 0x85, 0x65, 0x1f, 0x82, 0x32, 0x5c, 0x51, 0xe7,
+ 0x78, 0x5d, 0x3e, 0xf7, 0xb8, 0x3a, 0x18, 0x61, 0x88, 0xe9, 0x55, 0x32,
+ 0x53, 0x9f, 0x94, 0xad, 0x52, 0x2c, 0x29, 0x31, 0x15, 0x27, 0x4e, 0x5b,
+ 0x89, 0x80, 0xf1, 0x37, 0x9f, 0xd7, 0xb0, 0x10, 0xdf, 0x0f, 0x66, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0xa7, 0xb9, 0x4a,
+ 0x40, 0x64, 0xe4, 0xc0, 0xd4, 0x4e, 0xba, 0x45, 0x25, 0xd7, 0xd2, 0x11,
+ 0x0a, 0x80, 0x6b, 0x54, 0xbe, 0x8a, 0x04, 0xe3, 0x92, 0x92, 0x26, 0xbd,
+ 0x14, 0x90, 0x33, 0xde, 0x79, 0x5f, 0x6f, 0xa3, 0xc9, 0x73, 0x92, 0x46,
+ 0x32, 0x1a, 0xa9, 0xa3, 0xb9, 0x26, 0x02, 0x25, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x49, 0xbc, 0xc2, 0xf5, 0x8b, 0x70, 0x7b, 0x8e, 0x29, 0x01, 0xb5, 0x19,
+ 0x1d, 0x92, 0x89, 0x83, 0x2e, 0x4c, 0x29, 0x56, 0x7d, 0x49, 0xc7, 0x80,
+ 0xeb, 0xd1, 0xcf, 0xf8, 0x4c, 0x6a, 0x99, 0x64, 0x2c, 0xae, 0xbb, 0xd3,
+ 0x16, 0xee, 0x3e, 0x13, 0x36, 0xa5, 0x43, 0xee, 0xa8, 0x7a, 0x68, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0xb4, 0x1c, 0x29,
+ 0xb5, 0x69, 0x94, 0x6d, 0x15, 0x10, 0xe7, 0xd4, 0x3e, 0xf2, 0x26, 0x7e,
+ 0x91, 0x23, 0x50, 0x72, 0xd4, 0xb3, 0x39, 0x4d, 0x58, 0xea, 0xff, 0x04,
+ 0x8f, 0xbd, 0x85, 0xd1, 0xd3, 0x49, 0xab, 0x03, 0x78, 0xa6, 0x78, 0x47,
+ 0xf2, 0x77, 0xba, 0xcd, 0xa5, 0x0e, 0xe4, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0xb0, 0x56, 0x58, 0x5f, 0x86, 0x3b, 0xbd, 0xe9, 0x2c, 0xdc, 0x5a,
+ 0xb4, 0x83, 0x28, 0x3d, 0xeb, 0xb3, 0x12, 0x09, 0xdc, 0x7c, 0x42, 0x1d,
+ 0x3a, 0xfc, 0xbd, 0x79, 0x6d, 0x01, 0xa5, 0xa8, 0xe2, 0xb0, 0x67, 0xca,
+ 0xa0, 0x8b, 0x6a, 0x51, 0x02, 0x6e, 0x0d, 0xc2, 0xe8, 0xcb, 0x7a, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0xc3, 0x50, 0x29,
+ 0x02, 0xdd, 0xe1, 0x8a, 0x64, 0xc1, 0x5f, 0xac, 0xd8, 0xc6, 0xcf, 0x36,
+ 0x17, 0xea, 0x27, 0x01, 0x10, 0x78, 0x1e, 0x45, 0xd6, 0x8d, 0x1f, 0xfc,
+ 0x1f, 0x34, 0x43, 0xd8, 0x4b, 0xe2, 0x56, 0x37, 0x8c, 0x74, 0x61, 0xa5,
+ 0xae, 0x88, 0x66, 0xba, 0xd8, 0xef, 0x24, 0xe1, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xac, 0x3a, 0x78, 0xd0, 0xd2, 0x65, 0xa9, 0x1c, 0x1a, 0x29, 0xf8, 0xef,
+ 0x6c, 0x8f, 0x83, 0xd3, 0xef, 0x98, 0xfd, 0xde, 0x8f, 0xd8, 0xd8, 0x17,
+ 0xdf, 0x45, 0x9e, 0xa1, 0xc4, 0x2b, 0xf7, 0x48, 0x14, 0xda, 0xfc, 0x39,
+ 0x81, 0xa7, 0x3d, 0xc7, 0xb0, 0x3d, 0xfa, 0x54, 0xc5, 0x2a, 0xfa, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x40, 0x6f, 0x6e,
+ 0x6c, 0x0d, 0x2c, 0xe7, 0xcd, 0x12, 0x0b, 0x2b, 0x41, 0xfd, 0x72, 0xca,
+ 0xef, 0x5d, 0x90, 0x06, 0x78, 0xf6, 0x02, 0xdd, 0xf5, 0xf8, 0xa2, 0xd1,
+ 0x8a, 0xcc, 0xf2, 0x29, 0xaa, 0xfd, 0x1f, 0xcf, 0xce, 0x6d, 0x90, 0x8a,
+ 0x2c, 0xe2, 0x88, 0x5a, 0x0e, 0x6d, 0x85, 0xf2, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x89, 0x10, 0x9a, 0x0e, 0xc6, 0x26, 0x66, 0xde, 0xc8, 0xc1, 0x2e, 0x75,
+ 0x7f, 0xfc, 0xd0, 0x1e, 0xa8, 0x20, 0x61, 0x69, 0xc4, 0x8b, 0x5a, 0xb0,
+ 0x4b, 0xc2, 0xfd, 0xcf, 0xf9, 0x83, 0xac, 0x6c, 0x59, 0xcf, 0xca, 0x71,
+ 0x55, 0x97, 0x7d, 0x23, 0x12, 0x64, 0xcb, 0x33, 0x57, 0x66, 0xc9, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x69, 0x13, 0x81,
+ 0x2e, 0x01, 0x4b, 0x4b, 0x31, 0xd2, 0x87, 0x07, 0xe4, 0x48, 0x3e, 0xc5,
+ 0xcb, 0xf7, 0x19, 0x0c, 0xff, 0xb1, 0x97, 0x58, 0xb6, 0x67, 0x17, 0xa0,
+ 0x65, 0xa5, 0xf2, 0x48, 0xd9, 0x4a, 0xd8, 0xfa, 0xc5, 0x3b, 0x4f, 0x69,
+ 0x11, 0x9e, 0xbe, 0xee, 0xa1, 0xa1, 0xa3, 0x76, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+static const uint8_t fpm_table_p521[CPT_P521_LEN] = {
+ 0xf9, 0x7e, 0x7e, 0x31, 0xc2, 0xe5, 0xbd, 0x66, 0x33, 0x48, 0xb3, 0xc1,
+ 0x85, 0x6a, 0x42, 0x9b, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff, 0xa8, 0xde,
+ 0xa1, 0x4b, 0x5e, 0x77, 0xef, 0xe7, 0x59, 0x28, 0xf8, 0x28, 0xaf, 0x60,
+ 0x6b, 0x4d, 0x3d, 0xba, 0x9c, 0x64, 0x81, 0x39, 0x05, 0x3f, 0xb5, 0x21,
+ 0x9e, 0x3e, 0xcb, 0x66, 0x23, 0x95, 0xb4, 0x42, 0x85, 0x8e, 0x06, 0xb7,
+ 0x04, 0x04, 0xe9, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6,
+ 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50, 0x35, 0x3c, 0x70, 0x86,
+ 0xa2, 0x72, 0xc2, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad, 0x07, 0x61,
+ 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0x17, 0xaf, 0xbd, 0x17,
+ 0x27, 0x3e, 0x66, 0x2c, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, 0x44, 0x68,
+ 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x39, 0x29, 0x6a, 0x78,
+ 0x9a, 0x3b, 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x18,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x69, 0xca, 0xd3, 0xcc, 0xc4, 0xd6, 0xab, 0x08, 0x3a, 0xdb, 0x57, 0x77,
+ 0x3b, 0x89, 0x90, 0xb8, 0xd0, 0xca, 0xd8, 0xce, 0x8d, 0x95, 0x88, 0x01,
+ 0xcb, 0x57, 0x2e, 0x66, 0x6d, 0x72, 0x8f, 0x9e, 0xe3, 0xd9, 0xe7, 0xc4,
+ 0xcd, 0x51, 0x31, 0xfc, 0xaf, 0xce, 0xb6, 0xb0, 0x61, 0x45, 0xdc, 0x06,
+ 0x12, 0xec, 0xd3, 0x92, 0xe2, 0x13, 0x04, 0x3a, 0xbd, 0x59, 0x92, 0x94,
+ 0x3a, 0x64, 0xc8, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b,
+ 0x86, 0x23, 0xbd, 0xbb, 0xf6, 0xea, 0x9c, 0xf1, 0x3a, 0xad, 0x94, 0x95,
+ 0x54, 0x7a, 0xa6, 0x50, 0xd3, 0xd8, 0x53, 0xfc, 0xbe, 0xb2, 0x71, 0x59,
+ 0x3d, 0x25, 0xa6, 0x48, 0x30, 0xb4, 0x08, 0x33, 0x12, 0xd1, 0x88, 0xe8,
+ 0xde, 0xc5, 0x1b, 0xd1, 0x83, 0x63, 0x30, 0xd2, 0xb3, 0x48, 0xc3, 0xfa,
+ 0x9d, 0xf5, 0x0c, 0xfe, 0x73, 0xc2, 0xea, 0x59, 0xb5, 0xdf, 0xfb, 0x20,
+ 0x61, 0xde, 0xd0, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xdc, 0x33, 0x41, 0x6e, 0xc8, 0xc5, 0xbc, 0xb2, 0xde, 0xfb, 0x4a, 0x9c,
+ 0xe2, 0x44, 0x91, 0x0b, 0x27, 0xc3, 0x56, 0x1b, 0x53, 0xa8, 0xf7, 0xb9,
+ 0x10, 0x88, 0xbb, 0x9e, 0xf6, 0x94, 0xd7, 0xb1, 0x98, 0xfa, 0x92, 0xaa,
+ 0xa6, 0xd2, 0xc7, 0x82, 0x53, 0xc2, 0xa3, 0xdb, 0x3b, 0xa3, 0x7d, 0xd4,
+ 0x67, 0xfc, 0x7c, 0xab, 0xd5, 0x93, 0x4b, 0xbc, 0x0c, 0x72, 0xcf, 0x96,
+ 0x93, 0xbb, 0x09, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf,
+ 0xf3, 0x2e, 0xbb, 0x1f, 0x13, 0xda, 0xb4, 0x57, 0x7c, 0x36, 0x11, 0xdf,
+ 0xad, 0x23, 0x53, 0x70, 0xd8, 0x6d, 0x54, 0xdb, 0xab, 0x9e, 0x13, 0x10,
+ 0xbf, 0x40, 0x10, 0xf1, 0x61, 0x85, 0xbf, 0x0d, 0x94, 0x6d, 0xb5, 0x6e,
+ 0x31, 0x3c, 0x69, 0xf5, 0x3b, 0x67, 0x3c, 0x92, 0xe3, 0x77, 0x73, 0x27,
+ 0x58, 0x7a, 0x4e, 0xa7, 0x47, 0x7a, 0xbd, 0xe6, 0xae, 0x87, 0xa6, 0x00,
+ 0xd8, 0xaa, 0xa4, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xbf, 0x2f, 0xfc, 0xb7, 0x6e, 0x64, 0x0a, 0x8d, 0x63, 0x47, 0x95, 0x01,
+ 0xa0, 0xb5, 0xa0, 0x7e, 0x55, 0xbb, 0x30, 0x01, 0x5f, 0x36, 0xf2, 0xe7,
+ 0x98, 0x90, 0xf9, 0x99, 0x05, 0x8a, 0x67, 0x6a, 0xd9, 0xee, 0x34, 0x1b,
+ 0x45, 0x5c, 0x0d, 0x27, 0x6c, 0x95, 0x78, 0x0c, 0x18, 0xe0, 0x8f, 0xc8,
+ 0xeb, 0x63, 0xa6, 0x75, 0x44, 0x2a, 0x07, 0x5d, 0xce, 0x46, 0xa1, 0xa5,
+ 0xfb, 0x69, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b,
+ 0x8c, 0x61, 0x89, 0x34, 0xf3, 0xed, 0x62, 0x53, 0xe0, 0x03, 0x42, 0x44,
+ 0x6e, 0x94, 0x33, 0xb4, 0x98, 0x9b, 0x99, 0x21, 0x42, 0x60, 0xb2, 0xc5,
+ 0x11, 0x69, 0x98, 0x04, 0xd9, 0xdf, 0x47, 0x47, 0x12, 0x5f, 0xe6, 0x1f,
+ 0x76, 0x3a, 0x7f, 0x63, 0xa9, 0x72, 0x78, 0x6d, 0xc6, 0xec, 0x39, 0x5a,
+ 0x66, 0x2f, 0x9f, 0xe7, 0xb7, 0xb7, 0xb8, 0xc6, 0xfb, 0x64, 0x4a, 0x61,
+ 0x54, 0x56, 0x55, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x52, 0x3c, 0xf9, 0xb2, 0xfd, 0xe7, 0x22, 0x35, 0x5c, 0x90, 0xde, 0xf0,
+ 0xe8, 0xe0, 0x99, 0x59, 0xfe, 0x13, 0x1b, 0x9c, 0xd3, 0xcf, 0x46, 0x66,
+ 0xc9, 0x14, 0x31, 0x30, 0xc1, 0x32, 0x76, 0xad, 0xa7, 0xdc, 0xdd, 0xc1,
+ 0x85, 0xe2, 0x36, 0x37, 0x09, 0x45, 0x74, 0xcc, 0xf5, 0x14, 0x11, 0xd7,
+ 0xf3, 0xfc, 0x87, 0xc4, 0xbd, 0x29, 0xfe, 0xd7, 0x2c, 0xc3, 0x2d, 0x3f,
+ 0x17, 0x1c, 0xef, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x32,
+ 0xdf, 0xa9, 0x2d, 0x3e, 0x35, 0xcc, 0x7a, 0x6c, 0xdd, 0x6d, 0xc6, 0x86,
+ 0xd4, 0xe4, 0x78, 0x8b, 0xcb, 0x66, 0xcd, 0xe2, 0x1f, 0x74, 0xbb, 0xe0,
+ 0xb0, 0xe9, 0xff, 0x6a, 0xf6, 0x7e, 0xc3, 0x95, 0x18, 0x6c, 0xfa, 0x86,
+ 0x07, 0xb9, 0xdd, 0xff, 0xe8, 0x67, 0xde, 0x2f, 0xcf, 0x2d, 0xfd, 0x72,
+ 0x49, 0x8c, 0x21, 0x91, 0xe2, 0x4e, 0xd3, 0x15, 0x2d, 0xf0, 0xac, 0xf8,
+ 0xf7, 0x37, 0xe8, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x52, 0x6f, 0x00, 0x95, 0x74, 0x31, 0x82, 0x2a, 0x18, 0x5d, 0x92, 0xc3,
+ 0xeb, 0x0c, 0x4e, 0xf8, 0xc8, 0x78, 0x13, 0x76, 0x38, 0x89, 0x30, 0x98,
+ 0x32, 0x54, 0x7e, 0xec, 0x6a, 0x55, 0x72, 0xd0, 0xe1, 0xe8, 0xea, 0xe8,
+ 0xf5, 0x94, 0x62, 0x73, 0x9a, 0x9e, 0x24, 0x00, 0xc8, 0x2f, 0x4f, 0x17,
+ 0xfb, 0x98, 0xab, 0xff, 0xdb, 0x9f, 0x0e, 0x9b, 0x3c, 0x20, 0x1a, 0xa5,
+ 0x83, 0x28, 0x87, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x95,
+ 0x58, 0x76, 0x12, 0xa4, 0x41, 0xc2, 0xb1, 0x4a, 0x11, 0x91, 0xb7, 0x1d,
+ 0xfd, 0xbf, 0x12, 0x43, 0x97, 0x39, 0x6e, 0xe7, 0xbc, 0xf5, 0x3f, 0x43,
+ 0xd1, 0x4b, 0xf1, 0xa7, 0x90, 0xec, 0xf9, 0x76, 0x7f, 0x14, 0x7a, 0x72,
+ 0x0b, 0xc6, 0xa0, 0xea, 0x40, 0x95, 0x18, 0xf8, 0xaf, 0xcb, 0xff, 0x46,
+ 0x30, 0x21, 0xdc, 0xa5, 0x32, 0x17, 0x0c, 0x93, 0x88, 0x16, 0xd3, 0xee,
+ 0x33, 0xf2, 0x46, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xcc, 0x7d, 0x6e, 0x15, 0x2f, 0x97, 0x03, 0x30, 0x50, 0xb1, 0xf6, 0x9e,
+ 0x03, 0x00, 0x75, 0x86, 0xfc, 0x0e, 0x37, 0x04, 0x58, 0x25, 0x83, 0x51,
+ 0xa8, 0x5d, 0x47, 0xe3, 0x56, 0xd8, 0xaf, 0x60, 0x1c, 0x89, 0x3b, 0x86,
+ 0x8a, 0xc7, 0x42, 0x7f, 0x8e, 0x70, 0xdd, 0xd9, 0x5c, 0xcf, 0x72, 0xde,
+ 0x1e, 0xd8, 0x3e, 0x13, 0x16, 0x06, 0x0a, 0x41, 0xc9, 0x00, 0xb8, 0x12,
+ 0x6d, 0xa8, 0x80, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f,
+ 0x3a, 0xc0, 0x08, 0x85, 0x5c, 0x6c, 0x32, 0x0b, 0x56, 0xa9, 0xb0, 0x12,
+ 0x24, 0x1f, 0x0c, 0xd6, 0x47, 0x94, 0x1b, 0x89, 0x97, 0x5d, 0xe7, 0x17,
+ 0xe7, 0x09, 0xef, 0x78, 0xdc, 0xe4, 0x8a, 0x06, 0x48, 0x2f, 0x2a, 0x6a,
+ 0x01, 0x56, 0xf2, 0xbd, 0xe7, 0xbd, 0xca, 0x0d, 0xae, 0x0a, 0x8a, 0x52,
+ 0x8e, 0x41, 0xf9, 0x41, 0x3f, 0x45, 0x94, 0x01, 0xea, 0x6d, 0x1c, 0x40,
+ 0x60, 0x16, 0x6a, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa6, 0x6c, 0xcc, 0xc8, 0xd6, 0xe1, 0xbd, 0x72, 0x1e, 0xa4, 0xe9, 0x21,
+ 0x4a, 0xe2, 0xfa, 0x5c, 0x66, 0x77, 0x5a, 0xf2, 0x2d, 0x02, 0x1f, 0xa7,
+ 0x6d, 0x71, 0x1d, 0xfb, 0x2a, 0x4c, 0x46, 0x77, 0xdd, 0xaa, 0xe8, 0xbb,
+ 0x5a, 0xe3, 0x80, 0xb3, 0x53, 0x15, 0x89, 0x94, 0x60, 0x0f, 0x11, 0xfc,
+ 0xfe, 0xb1, 0x22, 0xdb, 0xda, 0x94, 0xd4, 0x43, 0x7c, 0xbf, 0x1a, 0xfa,
+ 0xdf, 0xfc, 0x21, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
+ 0x03, 0xcf, 0xa7, 0x31, 0x83, 0x4b, 0xf8, 0x91, 0x4e, 0x01, 0x60, 0x85,
+ 0x63, 0x0b, 0x80, 0x32, 0x90, 0xcf, 0x9b, 0x59, 0x49, 0xdb, 0x4d, 0x96,
+ 0x96, 0xfd, 0x26, 0x14, 0x33, 0x5c, 0x9d, 0xdd, 0xc0, 0x21, 0x45, 0x10,
+ 0x8e, 0x3b, 0x98, 0xfb, 0x6d, 0xed, 0x06, 0x33, 0x1d, 0xa2, 0xea, 0x2f,
+ 0x2b, 0xda, 0x6d, 0x76, 0x9d, 0x0e, 0xad, 0x76, 0x4b, 0xa0, 0x0e, 0x99,
+ 0xf3, 0xe4, 0xfb, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xb0, 0x31, 0x1b, 0x24, 0xcd, 0xed, 0xb3, 0xd3, 0x61, 0x14, 0xed, 0xd7,
+ 0xe5, 0xc3, 0xfb, 0x9d, 0x8e, 0x02, 0x64, 0x37, 0x1a, 0x6b, 0xb2, 0xa1,
+ 0xd7, 0xcc, 0x12, 0x87, 0x5e, 0xcd, 0xbe, 0x72, 0x6f, 0x71, 0x72, 0x87,
+ 0x15, 0x58, 0x28, 0xe1, 0xb8, 0x3a, 0xef, 0xaa, 0x5f, 0x9b, 0xf5, 0x5d,
+ 0x29, 0xd8, 0xc7, 0x42, 0x7e, 0x0b, 0x8a, 0xce, 0x18, 0xfe, 0x72, 0xa4,
+ 0x64, 0x25, 0x9f, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a,
+ 0x80, 0xf7, 0xfb, 0xb6, 0x2a, 0xa4, 0x37, 0x5d, 0x8d, 0x40, 0xb9, 0xee,
+ 0x6d, 0x0b, 0xec, 0x41, 0x55, 0xdd, 0x1c, 0xe0, 0x41, 0xdb, 0xbd, 0x4b,
+ 0xba, 0x77, 0x95, 0x12, 0x29, 0x4b, 0xc6, 0xcc, 0x38, 0x3d, 0x3a, 0x90,
+ 0x4a, 0xa6, 0x6d, 0xb2, 0x3c, 0x46, 0x18, 0x62, 0x6c, 0xa8, 0xd7, 0x01,
+ 0xf8, 0x0e, 0x3b, 0x59, 0x19, 0xbc, 0xfc, 0x0b, 0x9b, 0x63, 0x7d, 0x37,
+ 0x5f, 0x56, 0xcd, 0xc1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x74, 0x3b, 0x45, 0x1f, 0x86, 0xeb, 0x01, 0xd4, 0x63, 0x9c, 0x35, 0xde,
+ 0x4d, 0x93, 0xf5, 0x7f, 0xab, 0x5e, 0xaa, 0xc4, 0x71, 0xd1, 0x5e, 0x15,
+ 0xb7, 0x7b, 0xd7, 0x5b, 0xbe, 0x29, 0x19, 0xc6, 0x34, 0xb5, 0x39, 0x64,
+ 0x2d, 0x2e, 0x8f, 0xa7, 0x1c, 0x04, 0xb3, 0xbf, 0xc5, 0x70, 0xd5, 0x49,
+ 0x1b, 0x77, 0x64, 0x58, 0x6e, 0xaf, 0x33, 0xdc, 0xbd, 0x40, 0x33, 0xb2,
+ 0x4d, 0xf5, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x35, 0xe3, 0xa8, 0x3d, 0x9d, 0x79, 0x2a, 0xa8, 0x5e, 0xb6, 0x8e, 0xa9,
+ 0x73, 0x85, 0xfb, 0xe3, 0xcc, 0x3b, 0xf6, 0xb7, 0x91, 0xd7, 0x82, 0x0f,
+ 0x53, 0x9e, 0x98, 0xd0, 0x0b, 0x57, 0x18, 0x65, 0xdc, 0xc8, 0x13, 0x13,
+ 0x2f, 0xdc, 0x47, 0x3e, 0x52, 0x40, 0x20, 0x14, 0xb8, 0xa3, 0xef, 0xf6,
+ 0x93, 0x7b, 0x43, 0xd3, 0x30, 0x8e, 0x48, 0x85, 0x86, 0xd0, 0xac, 0xd5,
+ 0xbb, 0x8e, 0x72, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x49, 0x2d, 0xc8, 0x61, 0xce, 0x07, 0x6e, 0x91, 0x1b, 0xed, 0x06, 0xac,
+ 0xb5, 0xa5, 0x79, 0x7a, 0x80, 0x25, 0x4f, 0x8a, 0x10, 0xd2, 0xef, 0x33,
+ 0x97, 0x66, 0x0e, 0x4f, 0xd1, 0xa3, 0x6b, 0x3e, 0x7d, 0xa6, 0xe9, 0x2f,
+ 0x35, 0xaa, 0xee, 0xfe, 0xed, 0x65, 0x1c, 0x3f, 0x89, 0x08, 0xbd, 0xe5,
+ 0x99, 0xed, 0x82, 0xc0, 0x14, 0xb0, 0xc8, 0xd7, 0x76, 0xd6, 0xd5, 0x2e,
+ 0x23, 0x42, 0x44, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0x49, 0xed, 0x49, 0xd7, 0x20, 0x06, 0x0e, 0x54, 0x5d, 0xae, 0xaa, 0x77,
+ 0x53, 0x5d, 0x38, 0x1c, 0x02, 0x7b, 0x07, 0x4e, 0x2f, 0x84, 0x36, 0x83,
+ 0xe1, 0xc5, 0x2f, 0x6c, 0x4f, 0x54, 0x94, 0x1d, 0xc5, 0xa9, 0x18, 0x83,
+ 0x69, 0xbe, 0x08, 0xb0, 0xd8, 0xe8, 0x81, 0x44, 0x0f, 0xc0, 0x0b, 0x32,
+ 0x5c, 0x14, 0x9a, 0x7f, 0x39, 0x77, 0x6c, 0xe6, 0x45, 0xab, 0xab, 0x75,
+ 0x48, 0x39, 0xd3, 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xaf, 0xe2, 0xc9, 0x9e, 0x41, 0xc5, 0xad, 0x4e, 0x91, 0xbd, 0x0f, 0x74,
+ 0x04, 0x97, 0x37, 0x24, 0x11, 0x0d, 0x39, 0x02, 0x70, 0x29, 0xce, 0x18,
+ 0x31, 0x39, 0x96, 0x1a, 0xe4, 0xd8, 0xcd, 0x86, 0x20, 0x41, 0xf8, 0xf2,
+ 0x65, 0x04, 0x09, 0xad, 0xf6, 0x03, 0x67, 0x47, 0x5c, 0x3a, 0xb7, 0xe7,
+ 0x0c, 0xfa, 0xe7, 0xe4, 0x5f, 0xc5, 0x9d, 0x58, 0xa2, 0xa1, 0x41, 0x9d,
+ 0xb0, 0x1d, 0xc2, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa8,
+ 0xe5, 0x88, 0xb5, 0xf2, 0xd7, 0x71, 0x52, 0xe1, 0x94, 0xf5, 0xe2, 0x1e,
+ 0x3d, 0x65, 0x7a, 0x29, 0x91, 0xbe, 0x93, 0xd9, 0xb2, 0x9b, 0xea, 0xa5,
+ 0x8c, 0xcc, 0xd4, 0x2c, 0xe4, 0x96, 0xf3, 0x7a, 0xbf, 0x29, 0x5a, 0x89,
+ 0x7c, 0xc5, 0xff, 0xc9, 0x38, 0x64, 0xd8, 0x5e, 0xd8, 0xe3, 0x06, 0x5a,
+ 0xe1, 0x35, 0xda, 0x88, 0x59, 0x3c, 0xc5, 0x50, 0xd2, 0xe2, 0x1f, 0x06,
+ 0xa2, 0xac, 0xff, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x9f, 0x12, 0xaf, 0x8f, 0xf1, 0x27, 0x32, 0x22, 0x13, 0xd3, 0x23, 0xa3,
+ 0xc7, 0xb0, 0xbf, 0x6d, 0x7b, 0xbb, 0x66, 0x7d, 0xa8, 0x35, 0xd8, 0xea,
+ 0xd3, 0x14, 0x5f, 0xdd, 0xb3, 0x0e, 0x21, 0x3c, 0xe1, 0xd3, 0x17, 0x0f,
+ 0x86, 0x13, 0x38, 0x9c, 0x50, 0x37, 0x17, 0x64, 0x48, 0x12, 0x41, 0xf8,
+ 0x53, 0x7c, 0x14, 0xdd, 0xc2, 0xae, 0x9d, 0xbe, 0x86, 0x81, 0x31, 0x96,
+ 0x29, 0x64, 0x59, 0xc1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
+ 0xa4, 0xf4, 0x01, 0x3e, 0x71, 0xb7, 0x6e, 0x68, 0x31, 0x86, 0x62, 0xbc,
+ 0xf0, 0x1d, 0xcb, 0xa9, 0xd0, 0x55, 0xec, 0xb8, 0x18, 0xe8, 0x7f, 0x1f,
+ 0xc7, 0xcc, 0xf5, 0xfa, 0xbc, 0xf1, 0x44, 0x74, 0x3a, 0x39, 0x90, 0x76,
+ 0xf8, 0xa9, 0x58, 0x8a, 0x9f, 0x38, 0x62, 0x02, 0xa5, 0x38, 0x72, 0x4b,
+ 0x15, 0x80, 0xc0, 0xbb, 0x18, 0x20, 0xa4, 0xdd, 0xed, 0xb8, 0x90, 0x86,
+ 0xca, 0x8f, 0xdd, 0xa7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf4, 0xab, 0xf8, 0x15, 0x29, 0x73, 0x14, 0x27, 0xf5, 0xb8, 0x6b, 0x12,
+ 0xf9, 0xe4, 0x22, 0xc5, 0x4d, 0xd1, 0xa9, 0x71, 0x21, 0x97, 0xfd, 0x42,
+ 0x02, 0x10, 0x34, 0x7f, 0x6f, 0xfd, 0x80, 0xff, 0x22, 0x5c, 0xd3, 0xa0,
+ 0x7e, 0x37, 0x56, 0x30, 0xc2, 0x59, 0xfe, 0x53, 0xff, 0x95, 0x16, 0xb6,
+ 0xc0, 0xb8, 0x01, 0x10, 0xa6, 0x89, 0xdb, 0x24, 0xc3, 0xbd, 0xce, 0xb5,
+ 0x77, 0x0e, 0x1f, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e,
+ 0x0b, 0x89, 0x2f, 0x5c, 0xdd, 0x8c, 0x05, 0xb9, 0x41, 0x7c, 0x1a, 0xce,
+ 0xbf, 0x23, 0x01, 0x0c, 0xe9, 0x74, 0x9c, 0x6c, 0xbb, 0xf7, 0xf7, 0x53,
+ 0xfe, 0xc2, 0xd2, 0xe4, 0xb3, 0x3f, 0x5a, 0x06, 0x41, 0x02, 0x30, 0xdc,
+ 0x2f, 0xa4, 0x1c, 0x6b, 0x29, 0x27, 0x10, 0x68, 0xa1, 0x8a, 0x14, 0x16,
+ 0xab, 0xb9, 0xaf, 0xe4, 0x73, 0xb9, 0xec, 0x29, 0x72, 0xd9, 0x03, 0x5c,
+ 0x26, 0x79, 0x2a, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x28, 0xff, 0x7b, 0xe0, 0xfc, 0x9e, 0x23, 0xd2, 0xf5, 0xe0, 0x7f, 0xef,
+ 0xb8, 0x63, 0xa2, 0x40, 0x1b, 0x61, 0x96, 0xe4, 0x67, 0xcb, 0x5b, 0x0e,
+ 0x30, 0xa9, 0xa3, 0x6b, 0x9e, 0xc2, 0xfb, 0xfc, 0x06, 0xef, 0x3f, 0x4e,
+ 0xdf, 0x56, 0x80, 0x15, 0x72, 0x9b, 0xb1, 0x97, 0xc9, 0xf5, 0x26, 0x0b,
+ 0x52, 0xb0, 0xb4, 0xfe, 0xb6, 0x04, 0x15, 0x86, 0x26, 0x51, 0xb3, 0x20,
+ 0x63, 0xf1, 0x99, 0xba, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x34,
+ 0xdc, 0xfa, 0xf4, 0x3a, 0x05, 0xb6, 0x4e, 0xa9, 0x1d, 0xb7, 0x31, 0x9c,
+ 0x19, 0x16, 0x32, 0xe8, 0x3a, 0x60, 0xe8, 0xab, 0x97, 0x7a, 0x9c, 0x9d,
+ 0x85, 0x42, 0x8e, 0x55, 0xee, 0x3a, 0x97, 0x81, 0x71, 0xc3, 0x42, 0x1b,
+ 0x5b, 0x6d, 0x51, 0xc0, 0x01, 0xed, 0x96, 0x12, 0x52, 0x56, 0x02, 0x26,
+ 0x6c, 0xc1, 0xdb, 0xed, 0x90, 0x72, 0x2e, 0x36, 0xfa, 0xa6, 0x4f, 0x19,
+ 0xc2, 0xc7, 0x0c, 0xba, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+static const struct fpm_entry fpm_table_scalar[CPT_EC_ID_PMAX] = {
+ {
+ .data = fpm_table_p192,
+ .len = sizeof(fpm_table_p192)
+ },
+ {
+ .data = fpm_table_p224,
+ .len = sizeof(fpm_table_p224)
+ },
+ {
+ .data = fpm_table_p256,
+ .len = sizeof(fpm_table_p256)
+ },
+ {
+ .data = fpm_table_p384,
+ .len = sizeof(fpm_table_p384)
+ },
+ {
+ .data = fpm_table_p521,
+ .len = sizeof(fpm_table_p521)
+ }
+};
+
+static rte_spinlock_t lock = RTE_SPINLOCK_INITIALIZER;
+static uint8_t *fpm_table;
+static int nb_devs;
+
+int cpt_fpm_init(uint64_t *fpm_table_iova)
+{
+ int i, len = 0;
+ uint8_t *data;
+
+ if (fpm_table_iova == NULL)
+ return -EINVAL;
+
+ rte_spinlock_lock(&lock);
+
+ if (nb_devs != 0)
+ goto update_nb_devs;
+
+ /* Compute FPM table size for all supported curves */
+ for (i = 0; i < CPT_EC_ID_PMAX; i++)
+ len += fpm_table_scalar[i].len;
+
+ /* Allocate shared FPM table */
+ fpm_table = rte_malloc(NULL, len, 0);
+
+ if (fpm_table == NULL) {
+ rte_spinlock_unlock(&lock);
+ return -ENOMEM;
+ }
+ data = fpm_table;
+
+ for (i = CPT_EC_ID_P192; i < CPT_EC_ID_PMAX; i++) {
+ memcpy(data, fpm_table_scalar[i].data, fpm_table_scalar[i].len);
+ /* Convert iova address to big endian to be used by cpt */
+ fpm_table_iova[i] = rte_cpu_to_be_64(rte_mem_virt2iova(data));
+
+ data = RTE_PTR_ADD(data, fpm_table_scalar[i].len);
+ }
+
+update_nb_devs:
+
+ /* Increment number of devices using FPM table */
+ nb_devs++;
+
+ rte_spinlock_unlock(&lock);
+
+ return 0;
+}
+
+void cpt_fpm_clear(void)
+{
+ rte_spinlock_lock(&lock);
+
+ /* Decrement number of devices using FPM table */
+ nb_devs--;
+ if ((nb_devs == 0) && (fpm_table != NULL))
+ rte_free(fpm_table);
+
+ rte_spinlock_unlock(&lock);
+}
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_hw_types.h b/src/spdk/dpdk/drivers/common/cpt/cpt_hw_types.h
new file mode 100644
index 000000000..e2b127de4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_hw_types.h
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_HW_TYPES_H_
+#define _CPT_HW_TYPES_H_
+
+#include <rte_byteorder.h>
+
+/*
+ * This file defines HRM specific structs.
+ *
+ */
+
+#define CPT_VF_INTR_MBOX_MASK (1<<0)
+#define CPT_VF_INTR_DOVF_MASK (1<<1)
+#define CPT_VF_INTR_IRDE_MASK (1<<2)
+#define CPT_VF_INTR_NWRP_MASK (1<<3)
+#define CPT_VF_INTR_SWERR_MASK (1<<4)
+#define CPT_VF_INTR_HWERR_MASK (1<<5)
+#define CPT_VF_INTR_FAULT_MASK (1<<6)
+
+#define CPT_INST_SIZE (64)
+#define CPT_NEXT_CHUNK_PTR_SIZE (8)
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t opcode;
+ uint16_t param1;
+ uint16_t param2;
+ uint16_t dlen;
+#else
+ uint16_t dlen;
+ uint16_t param2;
+ uint16_t param1;
+ uint16_t opcode;
+#endif
+ } s;
+} vq_cmd_word0_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t grp : 3;
+ uint64_t cptr : 61;
+#else
+ uint64_t cptr : 61;
+ uint64_t grp : 3;
+#endif
+ } s;
+} vq_cmd_word3_t;
+
+typedef struct cpt_vq_command {
+ vq_cmd_word0_t cmd;
+ uint64_t dptr;
+ uint64_t rptr;
+ vq_cmd_word3_t cptr;
+} cpt_vq_cmd_t;
+
+/**
+ * Structure cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout.
+ * Instructions are stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ */
+typedef union cpt_inst_s {
+ uint64_t u[8];
+ struct cpt_inst_s_8s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ /* [ 16: 16] Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes,CPT()_VQ()_DONE[DONE]
+ * will be incremented, and based on the rules described
+ * there an interrupt may occur.
+ */
+ uint64_t doneint : 1;
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t doneint : 1;
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 1 - Big Endian */
+ /* [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ *
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ */
+ uint64_t res_addr : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t res_addr : 64;
+#endif /* Word 1 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 2 - Big Endian */
+ uint64_t reserved_172_191 : 20;
+ /* [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to
+ * use when CPT submits work to SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP()
+ * must map [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ */
+ uint64_t grp : 10;
+ /* [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use
+ * when CPT submits work to SSO.
+ */
+ uint64_t tt : 2;
+ /* [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when
+ * CPT submits work to SSO.
+ */
+ uint64_t tag : 32;
+#else /* Word 2 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t reserved_172_191 : 20;
+#endif /* Word 2 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 3 - Big Endian */
+ /** [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all
+ * context, output data, and result write operations are
+ * visible to other CNXXXX units and the cores.
+ * Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Internal:Bits <63:49>, <2:0> are ignored by hardware,
+ * treated as always 0x0.
+ **/
+ uint64_t wq_ptr : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 3 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 4 - Big Endian */
+ union {
+ /** [319:256] Engine instruction word 0. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei0 : 64;
+ vq_cmd_word0_t vq_cmd_w0;
+ };
+#else /* Word 4 - Little Endian */
+ union {
+ uint64_t ei0 : 64;
+ vq_cmd_word0_t vq_cmd_w0;
+ };
+#endif /* Word 4 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 5 - Big Endian */
+ union {
+ /** [383:320] Engine instruction word 1. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei1 : 64;
+ uint64_t dptr;
+ };
+#else /* Word 5 - Little Endian */
+ union {
+ uint64_t ei1 : 64;
+ uint64_t dptr;
+ };
+#endif /* Word 5 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 6 - Big Endian */
+ union {
+ /** [447:384] Engine instruction word 2. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei2 : 64;
+ uint64_t rptr;
+ };
+#else /* Word 6 - Little Endian */
+ union {
+ uint64_t ei2 : 64;
+ uint64_t rptr;
+ };
+#endif /* Word 6 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 7 - Big Endian */
+ union {
+ /** [511:448] Engine instruction word 3. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei3 : 64;
+ vq_cmd_word3_t vq_cmd_w3;
+ };
+#else /* Word 7 - Little Endian */
+ union {
+ uint64_t ei3 : 64;
+ vq_cmd_word3_t vq_cmd_w3;
+ };
+#endif /* Word 7 - End */
+ } s8x;
+ struct cpt_inst_s_9s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t nixtx_addr : 60;
+ uint64_t doneint : 1;
+ uint64_t nixtxl : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t nixtxl : 3;
+ uint64_t doneint : 1;
+ uint64_t nixtx_addr : 60;
+#endif /* Word 0 - End */
+ uint64_t res_addr;
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 2 - Big Endian */
+ uint64_t rvu_pf_func : 16;
+ uint64_t reserved_172_175 : 4;
+ uint64_t grp : 10;
+ uint64_t tt : 2;
+ uint64_t tag : 32;
+#else /* Word 2 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t reserved_172_175 : 4;
+ uint64_t rvu_pf_func : 16;
+#endif /* Word 2 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 3 - Big Endian */
+ uint64_t wq_ptr : 61;
+ uint64_t reserved_194_193 : 2;
+ uint64_t qord : 1;
+#else /* Word 3 - Little Endian */
+ uint64_t qord : 1;
+ uint64_t reserved_194_193 : 2;
+ uint64_t wq_ptr : 61;
+#endif /* Word 3 - End */
+ uint64_t ei0;
+ uint64_t ei1;
+ uint64_t ei2;
+ uint64_t ei3;
+ } s9x;
+} cpt_inst_s_t;
+
+/**
+ * Structure cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and each
+ * instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ */
+typedef union cpt_res_s {
+ uint64_t u[2];
+ struct cpt_res_s_8s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ /** [ 16: 16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ **/
+ uint64_t doneint : 1;
+ uint64_t reserved_8_15 : 8;
+ /** [ 7: 0] Indicates completion/error status of the CPT
+ * coprocessor for the associated instruction, as enumerated by
+ * CPT_COMP_E. Core software may write the memory location
+ * containing [COMPCODE] to 0x0 before ringing the doorbell, and
+ * then poll for completion by checking for a nonzero value.
+ *
+ * Once the core observes a nonzero [COMPCODE] value in this
+ * case, the CPT coprocessor will have also completed L2/DRAM
+ * write operations.
+ **/
+ uint64_t compcode : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t compcode : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t doneint : 1;
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 1 - Big Endian */
+ uint64_t reserved_64_127 : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64_127 : 64;
+#endif /* Word 1 - End */
+ } s8x;
+ struct cpt_res_s_9s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_17_63:47;
+ uint64_t doneint:1;
+ uint64_t uc_compcode:8;
+ uint64_t compcode:8;
+#else /* Word 0 - Little Endian */
+ uint64_t compcode:8;
+ uint64_t uc_compcode:8;
+ uint64_t doneint:1;
+ uint64_t reserved_17_63:47;
+#endif /* Word 0 - End */
+ uint64_t reserved_64_127;
+ } s9x;
+} cpt_res_s_t;
+
+/**
+ * Register (NCB) cpt#_vq#_ctl
+ *
+ * CPT VF Queue Control Registers
+ * This register configures queues. This register should be changed (other than
+ * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_ctl_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ /** [ 0: 0](R/W/H) Enables the logical instruction queue.
+ * See also CPT()_PF_Q()_CTL[CONT_ERR] and
+ * CPT()_VQ()_INPROG[INFLIGHT].
+ * 1 = Queue is enabled.
+ * 0 = Queue is disabled.
+ **/
+ uint64_t ena : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_ctl_t;
+
+/**
+ * Register (NCB) cpt#_vq#_done
+ *
+ * CPT Queue Done Count Registers
+ * These registers contain the per-queue instruction done count.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_done_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ /** [ 19: 0](R/W/H) Done count. When CPT_INST_S[DONEINT] set
+ * and that instruction completes,CPT()_VQ()_DONE[DONE] is
+ * incremented when the instruction finishes. Write to this
+ * field are for diagnostic use only; instead software writes
+ * CPT()_VQ()_DONE_ACK with the number of decrements for this
+ * field.
+ *
+ * Interrupts are sent as follows:
+ *
+ * When CPT()_VQ()_DONE[DONE] = 0, then no results are pending,
+ * the interrupt coalescing timer is held to zero, and an
+ * interrupt is not sent.
+ *
+ * When CPT()_VQ()_DONE[DONE] != 0, then the interrupt
+ * coalescing timer counts. If the counter is >= CPT()_VQ()_DONE
+ * _WAIT[TIME_WAIT]*1024, or CPT()_VQ()_DONE[DONE] >= CPT()_VQ()
+ * _DONE_WAIT[NUM_WAIT], i.e. enough time has passed or enough
+ * results have arrived, then the interrupt is sent. Otherwise,
+ * it is not sent due to coalescing.
+ *
+ * When CPT()_VQ()_DONE_ACK is written (or CPT()_VQ()_DONE is
+ * written but this is not typical), the interrupt coalescing
+ * timer restarts. Note after decrementing this interrupt
+ * equation is recomputed, for example if CPT()_VQ()_DONE[DONE]
+ * >= CPT()_VQ()_DONE_WAIT[NUM_WAIT] and because the timer is
+ * zero, the interrupt will be resent immediately. (This covers
+ * the race case between software acknowledging an interrupt and
+ * a result returning.)
+ *
+ * When CPT()_VQ()_DONE_ENA_W1S[DONE] = 0, interrupts are not
+ * sent, but the counting described above still occurs.
+ *
+ * Since CPT instructions complete out-of-order, if software is
+ * using completion interrupts the suggested scheme is to
+ * request a DONEINT on each request, and when an interrupt
+ * arrives perform a "greedy" scan for completions; even if a
+ * later command is acknowledged first this will not result in
+ * missing a completion.
+ *
+ * Software is responsible for making sure [DONE] does not
+ * overflow; for example by insuring there are not more than
+ * 2^20-1 instructions in flight that may request interrupts.
+ **/
+ uint64_t done : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t done : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_done_t;
+
+/**
+ * Register (NCB) cpt#_vq#_done_ack
+ *
+ * CPT Queue Done Count Ack Registers
+ * This register is written by software to acknowledge interrupts.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_done_ack_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ /** [ 19: 0](R/W/H) Number of decrements to CPT()_VQ()_DONE
+ * [DONE]. Reads CPT()_VQ()_DONE[DONE].
+ *
+ * Written by software to acknowledge interrupts. If CPT()_VQ()_
+ * DONE[DONE] is still nonzero the interrupt will be re-sent if
+ * the conditions described in CPT()_VQ()_DONE[DONE] are
+ * satisfied.
+ **/
+ uint64_t done_ack : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t done_ack : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_done_ack_t;
+
+/**
+ * Register (NCB) cpt#_vq#_done_wait
+ *
+ * CPT Queue Done Interrupt Coalescing Wait Registers
+ * Specifies the per queue interrupt coalescing settings.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_done_wait_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ /** [ 47: 32](R/W) Time hold-off. When CPT()_VQ()_DONE[DONE] =
+ * 0, or CPT()_VQ()_DONE_ACK is written a timer is cleared. When
+ * the timer reaches [TIME_WAIT]*1024 then interrupt coalescing
+ * ends; see CPT()_VQ()_DONE[DONE]. If 0x0, time coalescing is
+ * disabled.
+ **/
+ uint64_t time_wait : 16;
+ uint64_t reserved_20_31 : 12;
+ /** [ 19: 0](R/W) Number of messages hold-off. When
+ * CPT()_VQ()_DONE[DONE] >= [NUM_WAIT] then interrupt coalescing
+ * ends; see CPT()_VQ()_DONE[DONE]. If 0x0, same behavior as
+ * 0x1.
+ **/
+ uint64_t num_wait : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t num_wait : 20;
+ uint64_t reserved_20_31 : 12;
+ uint64_t time_wait : 16;
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_done_wait_t;
+
+/**
+ * Register (NCB) cpt#_vq#_doorbell
+ *
+ * CPT Queue Doorbell Registers
+ * Doorbells for the CPT instruction queues.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_doorbell_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell_cnt : 20;
+ /** [ 19: 0](R/W/H) Number of instruction queue 64-bit words
+ * to add to the CPT instruction doorbell count. Readback value
+ * is the the current number of pending doorbell requests.
+ *
+ * If counter overflows CPT()_VQ()_MISC_INT[DBELL_DOVF] is set.
+ *
+ * To reset the count back to zero, write one to clear
+ * CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF], then write a value
+ * of 2^20 minus the read [DBELL_CNT], then write one to
+ * CPT()_VQ()_MISC_INT_W1C[DBELL_DOVF] and
+ * CPT()_VQ()_MISC_INT_ENA_W1S[DBELL_DOVF].
+ *
+ * Must be a multiple of 8. All CPT instructions are 8 words
+ * and require a doorbell count of multiple of 8.
+ **/
+#else /* Word 0 - Little Endian */
+ uint64_t dbell_cnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_doorbell_t;
+
+/**
+ * Register (NCB) cpt#_vq#_inprog
+ *
+ * CPT Queue In Progress Count Registers
+ * These registers contain the per-queue instruction in flight registers.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_inprog_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ /** [ 7: 0](RO/H) Inflight count. Counts the number of
+ * instructions for the VF for which CPT is fetching, executing
+ * or responding to instructions. However this does not include
+ * any interrupts that are awaiting software handling
+ * (CPT()_VQ()_DONE[DONE] != 0x0).
+ *
+ * A queue may not be reconfigured until:
+ * 1. CPT()_VQ()_CTL[ENA] is cleared by software.
+ * 2. [INFLIGHT] is polled until equals to zero.
+ **/
+ uint64_t inflight : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t inflight : 8;
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_inprog_t;
+
+/**
+ * Register (NCB) cpt#_vq#_misc_int
+ *
+ * CPT Queue Misc Interrupt Register
+ * These registers contain the per-queue miscellaneous interrupts.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_misc_int_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ /** [ 6: 6](R/W1C/H) Translation fault detected. */
+ uint64_t fault : 1;
+ /** [ 5: 5](R/W1C/H) Hardware error from engines. */
+ uint64_t hwerr : 1;
+ /** [ 4: 4](R/W1C/H) Software error from engines. */
+ uint64_t swerr : 1;
+ /** [ 3: 3](R/W1C/H) NCB result write response error. */
+ uint64_t nwrp : 1;
+ /** [ 2: 2](R/W1C/H) Instruction NCB read response error. */
+ uint64_t irde : 1;
+ /** [ 1: 1](R/W1C/H) Doorbell overflow. */
+ uint64_t dovf : 1;
+ /** [ 0: 0](R/W1C/H) PF to VF mailbox interrupt. Set when
+ * CPT()_VF()_PF_MBOX(0) is written.
+ **/
+ uint64_t mbox : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1;
+ uint64_t dovf : 1;
+ uint64_t irde : 1;
+ uint64_t nwrp : 1;
+ uint64_t swerr : 1;
+ uint64_t hwerr : 1;
+ uint64_t fault : 1;
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_misc_int_t;
+
+/**
+ * Register (NCB) cpt#_vq#_saddr
+ *
+ * CPT Queue Starting Buffer Address Registers
+ * These registers set the instruction buffer starting address.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_saddr_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ /** [ 48: 6](R/W/H) Instruction buffer IOVA <48:6>
+ * (64-byte aligned). When written, it is the initial buffer
+ * starting address; when read, it is the next read pointer to
+ * be requested from L2C. The PTR field is overwritten with the
+ * next pointer each time that the command buffer segment is
+ * exhausted. New commands will then be read from the newly
+ * specified command buffer pointer.
+ **/
+ uint64_t ptr : 43;
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t ptr : 43;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_saddr_t;
+
+#endif /*_CPT_HW_TYPES_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_mcode_defines.h b/src/spdk/dpdk/drivers/common/cpt/cpt_mcode_defines.h
new file mode 100644
index 000000000..69d831b5c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_mcode_defines.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_MCODE_DEFINES_H_
+#define _CPT_MCODE_DEFINES_H_
+
+#include <rte_byteorder.h>
+#include <rte_crypto_asym.h>
+#include <rte_memory.h>
+
+/*
+ * This file defines macros and structures according to microcode spec
+ *
+ */
+/* SE opcodes */
+#define CPT_MAJOR_OP_FC 0x33
+#define CPT_MAJOR_OP_HASH 0x34
+#define CPT_MAJOR_OP_HMAC 0x35
+#define CPT_MAJOR_OP_ZUC_SNOW3G 0x37
+#define CPT_MAJOR_OP_KASUMI 0x38
+#define CPT_MAJOR_OP_MISC 0x01
+
+/* AE opcodes */
+#define CPT_MAJOR_OP_MODEX 0x03
+#define CPT_MAJOR_OP_ECDSA 0x04
+#define CPT_MAJOR_OP_ECC 0x05
+#define CPT_MINOR_OP_MODEX 0x01
+#define CPT_MINOR_OP_PKCS_ENC 0x02
+#define CPT_MINOR_OP_PKCS_ENC_CRT 0x03
+#define CPT_MINOR_OP_PKCS_DEC 0x04
+#define CPT_MINOR_OP_PKCS_DEC_CRT 0x05
+#define CPT_MINOR_OP_MODEX_CRT 0x06
+#define CPT_MINOR_OP_ECDSA_SIGN 0x01
+#define CPT_MINOR_OP_ECDSA_VERIFY 0x02
+#define CPT_MINOR_OP_ECC_UMP 0x03
+
+#define CPT_BLOCK_TYPE1 0
+#define CPT_BLOCK_TYPE2 1
+
+#define CPT_BYTE_16 16
+#define CPT_BYTE_24 24
+#define CPT_BYTE_32 32
+#define CPT_MAX_SG_IN_OUT_CNT 32
+#define CPT_MAX_SG_CNT (CPT_MAX_SG_IN_OUT_CNT/2)
+
+#define COMPLETION_CODE_SIZE 8
+#define COMPLETION_CODE_INIT 0
+
+#define SG_LIST_HDR_SIZE (8u)
+#define SG_ENTRY_SIZE sizeof(sg_comp_t)
+
+#define CPT_DMA_MODE (1 << 7)
+
+#define CPT_FROM_CTX 0
+#define CPT_FROM_DPTR 1
+
+#define FC_GEN 0x1
+#define ZUC_SNOW3G 0x2
+#define KASUMI 0x3
+#define HASH_HMAC 0x4
+
+#define ZS_EA 0x1
+#define ZS_IA 0x2
+#define K_F8 0x4
+#define K_F9 0x8
+
+#define CPT_OP_CIPHER_ENCRYPT 0x1
+#define CPT_OP_CIPHER_DECRYPT 0x2
+#define CPT_OP_CIPHER_MASK 0x3
+
+#define CPT_OP_AUTH_VERIFY 0x4
+#define CPT_OP_AUTH_GENERATE 0x8
+#define CPT_OP_AUTH_MASK 0xC
+
+#define CPT_OP_ENCODE (CPT_OP_CIPHER_ENCRYPT | CPT_OP_AUTH_GENERATE)
+#define CPT_OP_DECODE (CPT_OP_CIPHER_DECRYPT | CPT_OP_AUTH_VERIFY)
+
+/* #define CPT_ALWAYS_USE_SG_MODE */
+#define CPT_ALWAYS_USE_SEPARATE_BUF
+
+/*
+ * Parameters for Flexi Crypto
+ * requests
+ */
+#define VALID_AAD_BUF 0x01
+#define VALID_MAC_BUF 0x02
+#define VALID_IV_BUF 0x04
+#define SINGLE_BUF_INPLACE 0x08
+#define SINGLE_BUF_HEADTAILROOM 0x10
+
+#define ENCR_IV_OFFSET(__d_offs) ((__d_offs >> 32) & 0xffff)
+#define ENCR_OFFSET(__d_offs) ((__d_offs >> 16) & 0xffff)
+#define AUTH_OFFSET(__d_offs) (__d_offs & 0xffff)
+#define ENCR_DLEN(__d_lens) (__d_lens >> 32)
+#define AUTH_DLEN(__d_lens) (__d_lens & 0xffffffff)
+
+/* FC offset_control at start of DPTR in bytes */
+#define OFF_CTRL_LEN 8 /**< bytes */
+
+typedef enum {
+ MD5_TYPE = 1,
+ SHA1_TYPE = 2,
+ SHA2_SHA224 = 3,
+ SHA2_SHA256 = 4,
+ SHA2_SHA384 = 5,
+ SHA2_SHA512 = 6,
+ GMAC_TYPE = 7,
+ XCBC_TYPE = 8,
+ SHA3_SHA224 = 10,
+ SHA3_SHA256 = 11,
+ SHA3_SHA384 = 12,
+ SHA3_SHA512 = 13,
+ SHA3_SHAKE256 = 14,
+ SHA3_SHAKE512 = 15,
+
+ /* These are only for software use */
+ ZUC_EIA3 = 0x90,
+ SNOW3G_UIA2 = 0x91,
+ KASUMI_F9_CBC = 0x92,
+ KASUMI_F9_ECB = 0x93,
+} mc_hash_type_t;
+
+typedef enum {
+ /* To support passthrough */
+ PASSTHROUGH = 0x0,
+ /*
+ * These are defined by MC for Flexi crypto
+ * for field of 4 bits
+ */
+ DES3_CBC = 0x1,
+ DES3_ECB = 0x2,
+ AES_CBC = 0x3,
+ AES_ECB = 0x4,
+ AES_CFB = 0x5,
+ AES_CTR = 0x6,
+ AES_GCM = 0x7,
+ AES_XTS = 0x8,
+
+ /* These are only for software use */
+ ZUC_EEA3 = 0x90,
+ SNOW3G_UEA2 = 0x91,
+ KASUMI_F8_CBC = 0x92,
+ KASUMI_F8_ECB = 0x93,
+} mc_cipher_type_t;
+
+typedef enum {
+ AES_128_BIT = 0x1,
+ AES_192_BIT = 0x2,
+ AES_256_BIT = 0x3
+} mc_aes_type_t;
+
+typedef enum {
+ /* Microcode errors */
+ NO_ERR = 0x00,
+ ERR_OPCODE_UNSUPPORTED = 0x01,
+
+ /* SCATTER GATHER */
+ ERR_SCATTER_GATHER_WRITE_LENGTH = 0x02,
+ ERR_SCATTER_GATHER_LIST = 0x03,
+ ERR_SCATTER_GATHER_NOT_SUPPORTED = 0x04,
+
+ /* SE GC */
+ ERR_GC_LENGTH_INVALID = 0x41,
+ ERR_GC_RANDOM_LEN_INVALID = 0x42,
+ ERR_GC_DATA_LEN_INVALID = 0x43,
+ ERR_GC_DRBG_TYPE_INVALID = 0x44,
+ ERR_GC_CTX_LEN_INVALID = 0x45,
+ ERR_GC_CIPHER_UNSUPPORTED = 0x46,
+ ERR_GC_AUTH_UNSUPPORTED = 0x47,
+ ERR_GC_OFFSET_INVALID = 0x48,
+ ERR_GC_HASH_MODE_UNSUPPORTED = 0x49,
+ ERR_GC_DRBG_ENTROPY_LEN_INVALID = 0x4a,
+ ERR_GC_DRBG_ADDNL_LEN_INVALID = 0x4b,
+ ERR_GC_ICV_MISCOMPARE = 0x4c,
+ ERR_GC_DATA_UNALIGNED = 0x4d,
+
+ /* API Layer */
+ ERR_BAD_ALT_CCODE = 0xfd,
+ ERR_REQ_PENDING = 0xfe,
+ ERR_REQ_TIMEOUT = 0xff,
+
+ ERR_BAD_INPUT_LENGTH = (0x40000000 | 384), /* 0x40000180 */
+ ERR_BAD_KEY_LENGTH,
+ ERR_BAD_KEY_HANDLE,
+ ERR_BAD_CONTEXT_HANDLE,
+ ERR_BAD_SCALAR_LENGTH,
+ ERR_BAD_DIGEST_LENGTH,
+ ERR_BAD_INPUT_ARG,
+ ERR_BAD_RECORD_PADDING,
+ ERR_NB_REQUEST_PENDING,
+ ERR_EIO,
+ ERR_ENODEV,
+} mc_error_code_t;
+
+/**
+ * Enumeration cpt_comp_e
+ *
+ * CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+typedef enum {
+ CPT_8X_COMP_E_NOTDONE = (0x00),
+ CPT_8X_COMP_E_GOOD = (0x01),
+ CPT_8X_COMP_E_FAULT = (0x02),
+ CPT_8X_COMP_E_SWERR = (0x03),
+ CPT_8X_COMP_E_HWERR = (0x04),
+ CPT_8X_COMP_E_LAST_ENTRY = (0xFF)
+} cpt_comp_e_t;
+
+/**
+ * Enumeration cpt_ec_id
+ *
+ * Enumerates supported elliptic curves
+ */
+typedef enum {
+ CPT_EC_ID_P192 = 0,
+ CPT_EC_ID_P224 = 1,
+ CPT_EC_ID_P256 = 2,
+ CPT_EC_ID_P384 = 3,
+ CPT_EC_ID_P521 = 4,
+ CPT_EC_ID_PMAX = 5
+} cpt_ec_id_t;
+
+typedef struct sglist_comp {
+ union {
+ uint64_t len;
+ struct {
+ uint16_t len[4];
+ } s;
+ } u;
+ uint64_t ptr[4];
+} sg_comp_t;
+
+struct cpt_sess_misc {
+ /** CPT opcode */
+ uint16_t cpt_op:4;
+ /** ZUC, SNOW3G & KASUMI flags */
+ uint16_t zsk_flag:4;
+ /** Flag for AES GCM */
+ uint16_t aes_gcm:1;
+ /** Flag for AES CTR */
+ uint16_t aes_ctr:1;
+ /** Flag for NULL cipher/auth */
+ uint16_t is_null:1;
+ /** Flag for GMAC */
+ uint16_t is_gmac:1;
+ /** Engine group */
+ uint16_t egrp:3;
+ /** AAD length */
+ uint16_t aad_length;
+ /** MAC len in bytes */
+ uint8_t mac_len;
+ /** IV length in bytes */
+ uint8_t iv_length;
+ /** Auth IV length in bytes */
+ uint8_t auth_iv_length;
+ /** Reserved field */
+ uint8_t rsvd1;
+ /** IV offset in bytes */
+ uint16_t iv_offset;
+ /** Auth IV offset in bytes */
+ uint16_t auth_iv_offset;
+ /** Salt */
+ uint32_t salt;
+ /** Context DMA address */
+ phys_addr_t ctx_dma_addr;
+};
+
+typedef struct {
+ uint64_t iv_source : 1;
+ uint64_t aes_key : 2;
+ uint64_t rsvd_60 : 1;
+ uint64_t enc_cipher : 4;
+ uint64_t auth_input_type : 1;
+ uint64_t rsvd_52_54 : 3;
+ uint64_t hash_type : 4;
+ uint64_t mac_len : 8;
+ uint64_t rsvd_39_0 : 40;
+ uint8_t encr_key[32];
+ uint8_t encr_iv[16];
+} mc_enc_context_t;
+
+typedef struct {
+ uint8_t ipad[64];
+ uint8_t opad[64];
+} mc_fc_hmac_context_t;
+
+typedef struct {
+ mc_enc_context_t enc;
+ mc_fc_hmac_context_t hmac;
+} mc_fc_context_t;
+
+typedef struct {
+ uint8_t encr_auth_iv[16];
+ uint8_t ci_key[16];
+ uint8_t zuc_const[32];
+} mc_zuc_snow3g_ctx_t;
+
+typedef struct {
+ uint8_t reg_A[8];
+ uint8_t ci_key[16];
+} mc_kasumi_ctx_t;
+
+struct cpt_ctx {
+ /* Below fields are accessed by sw */
+ uint64_t enc_cipher :8;
+ uint64_t hash_type :8;
+ uint64_t mac_len :8;
+ uint64_t auth_key_len :8;
+ uint64_t fc_type :4;
+ uint64_t hmac :1;
+ uint64_t zsk_flags :3;
+ uint64_t k_ecb :1;
+ uint64_t snow3g :2;
+ uint64_t rsvd :21;
+ /* Below fields are accessed by hardware */
+ union {
+ mc_fc_context_t fctx;
+ mc_zuc_snow3g_ctx_t zs_ctx;
+ mc_kasumi_ctx_t k_ctx;
+ };
+ uint8_t auth_key[1024];
+};
+
+/* Prime and order fields of built-in elliptic curves */
+struct cpt_ec_group {
+ struct {
+ /* P521 maximum length */
+ uint8_t data[66];
+ unsigned int length;
+ } prime;
+
+ struct {
+ /* P521 maximum length */
+ uint8_t data[66];
+ unsigned int length;
+ } order;
+};
+
+struct cpt_asym_ec_ctx {
+ /* Prime length defined by microcode for EC operations */
+ uint8_t curveid;
+};
+
+struct cpt_asym_sess_misc {
+ enum rte_crypto_asym_xform_type xfrm_type;
+ union {
+ struct rte_crypto_rsa_xform rsa_ctx;
+ struct rte_crypto_modex_xform mod_ctx;
+ struct cpt_asym_ec_ctx ec_ctx;
+ };
+};
+
+/* Buffer pointer */
+typedef struct buf_ptr {
+ void *vaddr;
+ phys_addr_t dma_addr;
+ uint32_t size;
+ uint32_t resv;
+} buf_ptr_t;
+
+/* IOV Pointer */
+typedef struct{
+ int buf_cnt;
+ buf_ptr_t bufs[0];
+} iov_ptr_t;
+
+typedef union opcode_info {
+ uint16_t flags;
+ struct {
+ uint8_t major;
+ uint8_t minor;
+ } s;
+} opcode_info_t;
+
+typedef struct fc_params {
+ /* 0th cache line */
+ union {
+ buf_ptr_t bufs[1];
+ struct {
+ iov_ptr_t *src_iov;
+ iov_ptr_t *dst_iov;
+ };
+ };
+ void *iv_buf;
+ void *auth_iv_buf;
+ buf_ptr_t meta_buf;
+ buf_ptr_t ctx_buf;
+ uint64_t rsvd2;
+
+ /* 1st cache line */
+ buf_ptr_t aad_buf;
+ buf_ptr_t mac_buf;
+
+} fc_params_t;
+
+/*
+ * Parameters for asymmetric operations
+ */
+struct asym_op_params {
+ struct cpt_request_info *req;
+ phys_addr_t meta_buf;
+};
+
+/*
+ * Parameters for digest
+ * generate requests
+ * Only src_iov, op, ctx_buf, mac_buf, prep_req
+ * meta_buf, auth_data_len are used for digest gen.
+ */
+typedef struct fc_params digest_params_t;
+
+/* Cipher Algorithms */
+typedef mc_cipher_type_t cipher_type_t;
+
+/* Auth Algorithms */
+typedef mc_hash_type_t auth_type_t;
+
+/* Helper macros */
+
+#define SRC_IOV_SIZE \
+ (sizeof(iov_ptr_t) + (sizeof(buf_ptr_t) * CPT_MAX_SG_CNT))
+#define DST_IOV_SIZE \
+ (sizeof(iov_ptr_t) + (sizeof(buf_ptr_t) * CPT_MAX_SG_CNT))
+
+#define SESS_PRIV(__sess) \
+ (void *)((uint8_t *)__sess + sizeof(struct cpt_sess_misc))
+
+/*
+ * Get the session size
+ *
+ * @return
+ * - session size
+ */
+static __rte_always_inline unsigned int
+cpt_get_session_size(void)
+{
+ unsigned int ctx_len = sizeof(struct cpt_ctx);
+ return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
+}
+#endif /* _CPT_MCODE_DEFINES_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_logs.h b/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_logs.h
new file mode 100644
index 000000000..174326c6c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_logs.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_PMD_LOGS_H_
+#define _CPT_PMD_LOGS_H_
+
+#include <rte_log.h>
+
+/*
+ * This file defines log macros
+ */
+
+/*
+ * otx*_cryptodev.h file would define the CPT_LOGTYPE macro for the
+ * platform.
+ */
+#define CPT_PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, CPT_LOGTYPE, \
+ "cpt: %s(): " fmt "\n", __func__, ##args)
+
+#define CPT_PMD_INIT_FUNC_TRACE() CPT_PMD_DRV_LOG_RAW(DEBUG, " >>")
+
+#define CPT_LOG_INFO(fmt, args...) \
+ CPT_PMD_DRV_LOG_RAW(INFO, fmt, ## args)
+#define CPT_LOG_WARN(fmt, args...) \
+ CPT_PMD_DRV_LOG_RAW(WARNING, fmt, ## args)
+#define CPT_LOG_ERR(fmt, args...) \
+ CPT_PMD_DRV_LOG_RAW(ERR, fmt, ## args)
+
+/*
+ * DP logs, toggled out at compile time if level lower than current level.
+ * DP logs would be logged under 'PMD' type. So for dynamic logging, the
+ * level of 'pmd' has to be used.
+ */
+#define CPT_LOG_DP(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt "\n", ## args)
+
+#define CPT_LOG_DP_DEBUG(fmt, args...) \
+ CPT_LOG_DP(DEBUG, fmt, ## args)
+#define CPT_LOG_DP_INFO(fmt, args...) \
+ CPT_LOG_DP(INFO, fmt, ## args)
+#define CPT_LOG_DP_WARN(fmt, args...) \
+ CPT_LOG_DP(WARNING, fmt, ## args)
+#define CPT_LOG_DP_ERR(fmt, args...) \
+ CPT_LOG_DP(ERR, fmt, ## args)
+
+#endif /* _CPT_PMD_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c b/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c
new file mode 100644
index 000000000..09b762f81
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_common.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+#include "cpt_pmd_ops_helper.h"
+
+#define CPT_MAX_IV_LEN 16
+#define CPT_OFFSET_CONTROL_BYTES 8
+#define CPT_MAX_ASYM_OP_NUM_PARAMS 5
+#define CPT_MAX_ASYM_OP_MOD_LEN 1024
+
+int32_t
+cpt_pmd_ops_helper_get_mlen_direct_mode(void)
+{
+ uint32_t len = 0;
+
+ /* Request structure */
+ len = sizeof(struct cpt_request_info);
+
+ /* CPT HW result structure plus extra as it is aligned */
+ len += 2*sizeof(cpt_res_s_t);
+
+ return len;
+}
+
+int
+cpt_pmd_ops_helper_get_mlen_sg_mode(void)
+{
+ uint32_t len = 0;
+
+ len += sizeof(struct cpt_request_info);
+ len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
+ len += ROUNDUP8(SG_LIST_HDR_SIZE +
+ (ROUNDUP4(CPT_MAX_SG_IN_OUT_CNT) >> 2) * SG_ENTRY_SIZE);
+ len += 2 * COMPLETION_CODE_SIZE;
+ len += 2 * sizeof(cpt_res_s_t);
+ return len;
+}
+
+int
+cpt_pmd_ops_helper_asym_get_mlen(void)
+{
+ uint32_t len;
+
+ /* Get meta len for linear buffer (direct) mode */
+ len = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+ /* Get meta len for asymmetric operations */
+ len += CPT_MAX_ASYM_OP_NUM_PARAMS * CPT_MAX_ASYM_OP_MOD_LEN;
+ return len;
+}
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h b/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h
new file mode 100644
index 000000000..716ae94c8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_PMD_OPS_HELPER_H_
+#define _CPT_PMD_OPS_HELPER_H_
+
+/*
+ * This file defines the agreement between the common layer and the individual
+ * crypto drivers for OCTEON TX series. Control path in otx* directory can
+ * directly call functions declared here.
+ */
+
+/*
+ * Get meta length required when operating in direct mode (single buffer
+ * in-place)
+ *
+ * @return
+ * - length
+ */
+
+int32_t
+cpt_pmd_ops_helper_get_mlen_direct_mode(void);
+
+/*
+ * Get size of contiguous meta buffer to be allocated when working in scatter
+ * gather mode.
+ *
+ * @return
+ * - length
+ */
+int
+cpt_pmd_ops_helper_get_mlen_sg_mode(void);
+
+/*
+ * Get size of meta buffer to be allocated for asymmetric crypto operations
+ *
+ * @return
+ * - length
+ */
+int
+cpt_pmd_ops_helper_asym_get_mlen(void);
+
+/*
+ * Initialize ECC FMUL precomputed table
+ *
+ * @param
+ * - pointer to fpm_table iova address
+ *
+ * @return
+ * - 0 on success, negative on error
+ */
+__rte_experimental
+int cpt_fpm_init(uint64_t *fpm_table_iova);
+
+/*
+ * Clear ECC FMUL precomputed table
+ */
+__rte_experimental
+void cpt_fpm_clear(void);
+
+#endif /* _CPT_PMD_OPS_HELPER_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_ucode.h b/src/spdk/dpdk/drivers/common/cpt/cpt_ucode.h
new file mode 100644
index 000000000..34ccd08a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_ucode.h
@@ -0,0 +1,3505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_UCODE_H_
+#define _CPT_UCODE_H_
+#include <stdbool.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+
+/*
+ * This file defines functions that are interfaces to microcode spec.
+ *
+ */
+
+static uint8_t zuc_d[32] = {
+ 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
+ 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
+ 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
+ 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
+};
+
+static __rte_always_inline void
+gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
+{
+ int i, base;
+
+ for (i = 0; i < 4; i++) {
+ base = 4 * i;
+ keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
+ (ck[base + 2] << 8) | (ck[base + 3]);
+ keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
+ }
+}
+
+static __rte_always_inline void
+cpt_fc_salt_update(void *ctx,
+ uint8_t *salt)
+{
+ struct cpt_ctx *cpt_ctx = ctx;
+ memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_validate_key_aes(uint16_t key_len)
+{
+ switch (key_len) {
+ case CPT_BYTE_16:
+ case CPT_BYTE_24:
+ case CPT_BYTE_32:
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
+{
+ int fc_type = 0;
+ switch (type) {
+ case PASSTHROUGH:
+ fc_type = FC_GEN;
+ break;
+ case DES3_CBC:
+ case DES3_ECB:
+ fc_type = FC_GEN;
+ break;
+ case AES_CBC:
+ case AES_ECB:
+ case AES_CFB:
+ case AES_CTR:
+ case AES_GCM:
+ if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
+ return -1;
+ fc_type = FC_GEN;
+ break;
+ case AES_XTS:
+ key_len = key_len / 2;
+ if (unlikely(key_len == CPT_BYTE_24)) {
+ CPT_LOG_DP_ERR("Invalid AES key len for XTS");
+ return -1;
+ }
+ if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
+ return -1;
+ fc_type = FC_GEN;
+ break;
+ case ZUC_EEA3:
+ case SNOW3G_UEA2:
+ if (unlikely(key_len != 16))
+ return -1;
+ /* No support for AEAD yet */
+ if (unlikely(ctx->hash_type))
+ return -1;
+ fc_type = ZUC_SNOW3G;
+ break;
+ case KASUMI_F8_CBC:
+ case KASUMI_F8_ECB:
+ if (unlikely(key_len != 16))
+ return -1;
+ /* No support for AEAD yet */
+ if (unlikely(ctx->hash_type))
+ return -1;
+ fc_type = KASUMI;
+ break;
+ default:
+ return -1;
+ }
+
+ ctx->fc_type = fc_type;
+ return 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
+{
+ cpt_ctx->enc_cipher = 0;
+ fctx->enc.enc_cipher = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
+{
+ mc_aes_type_t aes_key_type = 0;
+ switch (key_len) {
+ case CPT_BYTE_16:
+ aes_key_type = AES_128_BIT;
+ break;
+ case CPT_BYTE_24:
+ aes_key_type = AES_192_BIT;
+ break;
+ case CPT_BYTE_32:
+ aes_key_type = AES_256_BIT;
+ break;
+ default:
+ /* This should not happen */
+ CPT_LOG_DP_ERR("Invalid AES key len");
+ return;
+ }
+ fctx->enc.aes_key = aes_key_type;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ uint32_t keyx[4];
+ cpt_ctx->snow3g = 1;
+ gen_key_snow3g(key, keyx);
+ memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ cpt_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ cpt_ctx->snow3g = 0;
+ memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
+ memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ cpt_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ cpt_ctx->k_ecb = 1;
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
+ uint16_t key_len, uint8_t *salt)
+{
+ struct cpt_ctx *cpt_ctx = ctx;
+ mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ int ret;
+
+ ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
+ if (unlikely(ret))
+ return -1;
+
+ if (cpt_ctx->fc_type == FC_GEN) {
+ /*
+ * We need to always say IV is from DPTR as user can
+ * sometimes iverride IV per operation.
+ */
+ fctx->enc.iv_source = CPT_FROM_DPTR;
+
+ if (cpt_ctx->auth_key_len > 64)
+ return -1;
+ }
+
+ switch (type) {
+ case PASSTHROUGH:
+ cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
+ goto success;
+ case DES3_CBC:
+ /* CPT performs DES using 3DES with the 8B DES-key
+ * replicated 2 more times to match the 24B 3DES-key.
+ * Eg. If org. key is "0x0a 0x0b", then new key is
+ * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
+ */
+ if (key_len == 8) {
+ /* Skipping the first 8B as it will be copied
+ * in the regular code flow
+ */
+ memcpy(fctx->enc.encr_key+key_len, key, key_len);
+ memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
+ }
+ break;
+ case DES3_ECB:
+ /* For DES3_ECB IV need to be from CTX. */
+ fctx->enc.iv_source = CPT_FROM_CTX;
+ break;
+ case AES_CBC:
+ case AES_ECB:
+ case AES_CFB:
+ case AES_CTR:
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+ break;
+ case AES_GCM:
+ /* Even though iv source is from dptr,
+ * aes_gcm salt is taken from ctx
+ */
+ if (salt) {
+ memcpy(fctx->enc.encr_iv, salt, 4);
+ /* Assuming it was just salt update
+ * and nothing else
+ */
+ if (!key)
+ goto success;
+ }
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+ break;
+ case AES_XTS:
+ key_len = key_len / 2;
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+
+ /* Copy key2 for XTS into ipad */
+ memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
+ memcpy(fctx->hmac.ipad, &key[key_len], key_len);
+ break;
+ case SNOW3G_UEA2:
+ cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
+ goto success;
+ case ZUC_EEA3:
+ cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
+ goto success;
+ case KASUMI_F8_ECB:
+ cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
+ goto success;
+ case KASUMI_F8_CBC:
+ cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
+ goto success;
+ default:
+ return -1;
+ }
+
+ /* Only for FC_GEN case */
+
+ /* For GMAC auth, cipher must be NULL */
+ if (cpt_ctx->hash_type != GMAC_TYPE)
+ fctx->enc.enc_cipher = type;
+
+ memcpy(fctx->enc.encr_key, key, key_len);
+
+success:
+ cpt_ctx->enc_cipher = type;
+
+ return 0;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp(sg_comp_t *list,
+ uint32_t i,
+ phys_addr_t dma_addr,
+ uint32_t size)
+{
+ sg_comp_t *to = &list[i>>2];
+
+ to->u.s.len[i%4] = rte_cpu_to_be_16(size);
+ to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
+ i++;
+ return i;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp_from_buf(sg_comp_t *list,
+ uint32_t i,
+ buf_ptr_t *from)
+{
+ sg_comp_t *to = &list[i>>2];
+
+ to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
+ to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
+ i++;
+ return i;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp_from_buf_min(sg_comp_t *list,
+ uint32_t i,
+ buf_ptr_t *from,
+ uint32_t *psize)
+{
+ sg_comp_t *to = &list[i >> 2];
+ uint32_t size = *psize;
+ uint32_t e_len;
+
+ e_len = (size > from->size) ? from->size : size;
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
+ *psize -= e_len;
+ i++;
+ return i;
+}
+
+/*
+ * This fills the MC expected SGIO list
+ * from IOV given by user.
+ */
+static __rte_always_inline uint32_t
+fill_sg_comp_from_iov(sg_comp_t *list,
+ uint32_t i,
+ iov_ptr_t *from, uint32_t from_offset,
+ uint32_t *psize, buf_ptr_t *extra_buf,
+ uint32_t extra_offset)
+{
+ int32_t j;
+ uint32_t extra_len = extra_buf ? extra_buf->size : 0;
+ uint32_t size = *psize;
+ buf_ptr_t *bufs;
+
+ bufs = from->bufs;
+ for (j = 0; (j < from->buf_cnt) && size; j++) {
+ phys_addr_t e_dma_addr;
+ uint32_t e_len;
+ sg_comp_t *to = &list[i >> 2];
+
+ if (unlikely(from_offset)) {
+ if (from_offset >= bufs[j].size) {
+ from_offset -= bufs[j].size;
+ continue;
+ }
+ e_dma_addr = bufs[j].dma_addr + from_offset;
+ e_len = (size > (bufs[j].size - from_offset)) ?
+ (bufs[j].size - from_offset) : size;
+ from_offset = 0;
+ } else {
+ e_dma_addr = bufs[j].dma_addr;
+ e_len = (size > bufs[j].size) ?
+ bufs[j].size : size;
+ }
+
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
+
+ if (extra_len && (e_len >= extra_offset)) {
+ /* Break the data at given offset */
+ uint32_t next_len = e_len - extra_offset;
+ phys_addr_t next_dma = e_dma_addr + extra_offset;
+
+ if (!extra_offset) {
+ i--;
+ } else {
+ e_len = extra_offset;
+ size -= e_len;
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ }
+
+ extra_len = RTE_MIN(extra_len, size);
+ /* Insert extra data ptr */
+ if (extra_len) {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] =
+ rte_cpu_to_be_16(extra_len);
+ to->ptr[i % 4] =
+ rte_cpu_to_be_64(extra_buf->dma_addr);
+ size -= extra_len;
+ }
+
+ next_len = RTE_MIN(next_len, size);
+ /* insert the rest of the data */
+ if (next_len) {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
+ size -= next_len;
+ }
+ extra_len = 0;
+
+ } else {
+ size -= e_len;
+ }
+ if (extra_offset)
+ extra_offset -= size;
+ i++;
+ }
+
+ *psize = size;
+ return (uint32_t)i;
+}
+
+static __rte_always_inline void
+cpt_digest_gen_prep(uint32_t flags,
+ uint64_t d_lens,
+ digest_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ struct cpt_request_info *req;
+ uint32_t size, i;
+ uint16_t data_len, mac_len, key_len;
+ auth_type_t hash_type;
+ buf_ptr_t *meta_p;
+ struct cpt_ctx *ctx;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr, *m_vaddr;
+ uint64_t c_dma, m_dma;
+ opcode_info_t opcode;
+
+ ctx = params->ctx_buf.vaddr;
+ meta_p = &params->meta_buf;
+
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ hash_type = ctx->hash_type;
+ mac_len = ctx->mac_len;
+ key_len = ctx->auth_key_len;
+ data_len = AUTH_DLEN(d_lens);
+
+ /*GP op header */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
+ if (ctx->hmac) {
+ opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = key_len;
+ vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
+ } else {
+ opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = 0;
+ vq_cmd_w0.s.dlen = data_len;
+ }
+
+ opcode.s.minor = 0;
+
+ /* Null auth only case enters the if */
+ if (unlikely(!hash_type && !ctx->enc_cipher)) {
+ opcode.s.major = CPT_MAJOR_OP_MISC;
+ /* Minor op is passthrough */
+ opcode.s.minor = 0x03;
+ /* Send out completion code only */
+ vq_cmd_w0.s.param2 = 0x1;
+ }
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input gather list
+ */
+
+ i = 0;
+
+ if (ctx->hmac) {
+ uint64_t k_dma = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, auth_key);
+ /* Key */
+ i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
+ }
+
+ /* input data */
+ size = data_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
+ 0, &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
+ " by %dB", size);
+ return;
+ }
+ } else {
+ /*
+ * Looks like we need to support zero data
+ * gather ptr in case of hash & hmac
+ */
+ i++;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Gather list
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags & VALID_MAC_BUF) {
+ if (unlikely(params->mac_buf.size < mac_len)) {
+ CPT_LOG_DP_ERR("Insufficient MAC size");
+ return;
+ }
+
+ size = mac_len;
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ &params->mac_buf, &size);
+ } else {
+ size = mac_len;
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->src_iov, data_len,
+ &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
+ " %dB", size);
+ return;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void
+cpt_enc_hmac_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *fc_params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t iv_offset = 0;
+ int32_t inputlen, outputlen, enc_dlen, auth_dlen;
+ struct cpt_ctx *cpt_ctx;
+ uint32_t cipher_type, hash_type;
+ uint32_t mac_len, size;
+ uint8_t iv_len = 16;
+ struct cpt_request_info *req;
+ buf_ptr_t *meta_p, *aad_buf = NULL;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len, aad_len = 0;
+ uint32_t passthrough_len = 0;
+ void *m_vaddr, *offset_vaddr;
+ uint64_t m_dma, offset_dma, ctx_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr;
+ uint64_t c_dma;
+ opcode_info_t opcode;
+
+ meta_p = &fc_params->meta_buf;
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+
+ encr_offset = ENCR_OFFSET(d_offs);
+ auth_offset = AUTH_OFFSET(d_offs);
+ encr_data_len = ENCR_DLEN(d_lens);
+ auth_data_len = AUTH_DLEN(d_lens);
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * We dont support both aad
+ * and auth data separately
+ */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ aad_buf = &fc_params->aad_buf;
+ }
+ cpt_ctx = fc_params->ctx_buf.vaddr;
+ cipher_type = cpt_ctx->enc_cipher;
+ hash_type = cpt_ctx->hash_type;
+ mac_len = cpt_ctx->mac_len;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* start cpt request info struct at 8 byte boundary */
+ size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
+ (uint8_t *)m_vaddr;
+
+ req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
+
+ size += sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ if (hash_type == GMAC_TYPE)
+ encr_data_len = 0;
+
+ if (unlikely(!(flags & VALID_IV_BUF))) {
+ iv_len = 0;
+ iv_offset = ENCR_IV_OFFSET(d_offs);
+ }
+
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ if (hash_type != GMAC_TYPE) {
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ } else {
+ passthrough_len = 16 + aad_len;
+ auth_offset = passthrough_len + iv_len;
+ auth_data_len = aad_len;
+ }
+ } else {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* Encryption */
+ opcode.s.major = CPT_MAJOR_OP_FC;
+ opcode.s.minor = 0;
+
+ auth_dlen = auth_offset + auth_data_len;
+ enc_dlen = encr_data_len + encr_offset;
+ if (unlikely(encr_data_len & 0xf)) {
+ if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
+ enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
+ else if (likely((cipher_type == AES_CBC) ||
+ (cipher_type == AES_ECB)))
+ enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
+ }
+
+ if (unlikely(hash_type == GMAC_TYPE)) {
+ encr_offset = auth_dlen;
+ enc_dlen = 0;
+ }
+
+ if (unlikely(auth_dlen > enc_dlen)) {
+ inputlen = auth_dlen;
+ outputlen = auth_dlen + mac_len;
+ } else {
+ inputlen = enc_dlen;
+ outputlen = enc_dlen + mac_len;
+ }
+
+ /* GP op header */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((flags & SINGLE_BUF_INPLACE) &&
+ (flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = fc_params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ req->ist.ei1 = offset_dma;
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ + outputlen - iv_len);
+
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ } else {
+ uint32_t i, g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+
+ /* This falls under strict SG mode */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+ size = OFF_CTRL_LEN + iv_len;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+
+ i = 0;
+
+ /* Offset control word that includes iv */
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (likely(size)) {
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(gather_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ fc_params->src_iov,
+ 0, &size,
+ aad_buf, aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter list
+ */
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* Add IV */
+ if (likely(iv_len)) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* output data or output data + digest*/
+ if (unlikely(flags & VALID_MAC_BUF)) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ scatter_comp,
+ i,
+ fc_params->bufs,
+ &size);
+ } else {
+ i = fill_sg_comp_from_iov(scatter_comp,
+ i,
+ fc_params->dst_iov,
+ 0,
+ &size,
+ aad_buf,
+ aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
+ }
+ /* mac_data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &fc_params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (likely(size)) {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ scatter_comp,
+ i,
+ fc_params->bufs,
+ &size);
+ } else {
+ i = fill_sg_comp_from_iov(scatter_comp,
+ i,
+ fc_params->dst_iov,
+ 0,
+ &size,
+ aad_buf,
+ aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ ctx_dma = fc_params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, fctx);
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = ctx_dma;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void
+cpt_dec_hmac_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *fc_params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t iv_offset = 0, size;
+ int32_t inputlen, outputlen, enc_dlen, auth_dlen;
+ struct cpt_ctx *cpt_ctx;
+ int32_t hash_type, mac_len;
+ uint8_t iv_len = 16;
+ struct cpt_request_info *req;
+ buf_ptr_t *meta_p, *aad_buf = NULL;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len, aad_len = 0;
+ uint32_t passthrough_len = 0;
+ void *m_vaddr, *offset_vaddr;
+ uint64_t m_dma, offset_dma, ctx_dma;
+ opcode_info_t opcode;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr;
+ uint64_t c_dma;
+
+ meta_p = &fc_params->meta_buf;
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+
+ encr_offset = ENCR_OFFSET(d_offs);
+ auth_offset = AUTH_OFFSET(d_offs);
+ encr_data_len = ENCR_DLEN(d_lens);
+ auth_data_len = AUTH_DLEN(d_lens);
+
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * We dont support both aad
+ * and auth data separately
+ */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ aad_buf = &fc_params->aad_buf;
+ }
+
+ cpt_ctx = fc_params->ctx_buf.vaddr;
+ hash_type = cpt_ctx->hash_type;
+ mac_len = cpt_ctx->mac_len;
+
+ if (hash_type == GMAC_TYPE)
+ encr_data_len = 0;
+
+ if (unlikely(!(flags & VALID_IV_BUF))) {
+ iv_len = 0;
+ iv_offset = ENCR_IV_OFFSET(d_offs);
+ }
+
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ if (hash_type != GMAC_TYPE) {
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ } else {
+ passthrough_len = 16 + aad_len;
+ auth_offset = passthrough_len + iv_len;
+ auth_data_len = aad_len;
+ }
+ } else {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* start cpt request info structure at 8 byte alignment */
+ size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
+ (uint8_t *)m_vaddr;
+
+ req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
+
+ size += sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* Decryption */
+ opcode.s.major = CPT_MAJOR_OP_FC;
+ opcode.s.minor = 1;
+
+ enc_dlen = encr_offset + encr_data_len;
+ auth_dlen = auth_offset + auth_data_len;
+
+ if (auth_dlen > enc_dlen) {
+ inputlen = auth_dlen + mac_len;
+ outputlen = auth_dlen;
+ } else {
+ inputlen = enc_dlen + mac_len;
+ outputlen = enc_dlen;
+ }
+
+ if (hash_type == GMAC_TYPE)
+ encr_offset = inputlen;
+
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
+
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((flags & SINGLE_BUF_INPLACE) &&
+ (flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = fc_params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+ req->ist.ei1 = offset_dma;
+
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
+ outputlen - iv_len);
+ /* since this is decryption,
+ * don't touch the content of
+ * alternate ccode space as it contains
+ * hmac.
+ */
+
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ } else {
+ uint64_t dptr_dma, rptr_dma;
+ uint32_t g_size_bytes, s_size_bytes;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint8_t i = 0;
+
+ /* This falls under strict SG mode */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+ size = OFF_CTRL_LEN + iv_len;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word that includes iv */
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ if (flags & VALID_MAC_BUF) {
+ size = inputlen - iv_len - mac_len;
+ if (size) {
+ /* input data only */
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset = aad_len ?
+ passthrough_len : 0;
+
+ i = fill_sg_comp_from_iov(gather_comp,
+ i,
+ fc_params->src_iov,
+ 0, &size,
+ aad_buf,
+ aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(gather_comp, i,
+ &fc_params->mac_buf);
+ }
+ } else {
+ /* input data + mac */
+ size = inputlen - iv_len;
+ if (size) {
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset = aad_len ?
+ passthrough_len : 0;
+
+ if (unlikely(!fc_params->src_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return;
+ }
+
+ i = fill_sg_comp_from_iov(
+ gather_comp, i,
+ fc_params->src_iov,
+ 0, &size,
+ aad_buf,
+ aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* Add iv */
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ /* handle single buffer here */
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset = aad_len ?
+ passthrough_len : 0;
+
+ if (unlikely(!fc_params->dst_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return;
+ }
+
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ fc_params->dst_iov, 0,
+ &size, aad_buf,
+ aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+ size += COMPLETION_CODE_SIZE;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ ctx_dma = fc_params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, fctx);
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = ctx_dma;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void
+cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen, outputlen;
+ struct cpt_ctx *cpt_ctx;
+ uint32_t mac_len = 0;
+ uint8_t snow3g, j;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset = 0, auth_offset = 0;
+ uint32_t encr_data_len = 0, auth_data_len = 0;
+ int flags, iv_len = 16;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma, offset_ctrl;
+ uint64_t *offset_vaddr, offset_dma;
+ uint32_t *iv_s, iv[4];
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ mac_len = cpt_ctx->mac_len;
+ snow3g = cpt_ctx->snow3g;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+
+ /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
+
+ opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
+ (0 << 3) | (flags & 0x7));
+
+ if (flags == 0x1) {
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ auth_data_len = AUTH_DLEN(d_lens);
+
+ /* EIA3 or UIA2 */
+ auth_offset = AUTH_OFFSET(d_offs);
+ auth_offset = auth_offset / 8;
+
+ /* consider iv len */
+ auth_offset += iv_len;
+
+ inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
+ outputlen = mac_len;
+
+ offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
+
+ } else {
+ /* EEA3 or UEA2 */
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ encr_data_len = ENCR_DLEN(d_lens);
+
+ encr_offset = ENCR_OFFSET(d_offs);
+ encr_offset = encr_offset / 8;
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* iv offset is 0 */
+ offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ }
+
+ /* IV */
+ iv_s = (flags == 0x1) ? params->auth_iv_buf :
+ params->iv_buf;
+
+ if (snow3g) {
+ /*
+ * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
+ * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
+ */
+
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[3 - j];
+ } else {
+ /* ZUC doesn't need a swap */
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[j];
+ }
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
+
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((req_flags & SINGLE_BUF_INPLACE) &&
+ (req_flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
+ OFF_CTRL_LEN - iv_len);
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ req->ist.ei1 = offset_dma;
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ + outputlen - iv_len);
+
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ if (likely(iv_len)) {
+ uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+ }
+
+ *offset_vaddr = offset_ctrl;
+ } else {
+ uint32_t i, g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t *iv_d;
+
+ /* save space for iv */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ /* iv offset is 0 */
+ *offset_vaddr = offset_ctrl;
+
+ iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+
+ /* input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov,
+ 0, &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags == 0x1) {
+ /* IV in SLIST only for EEA3 & UEA2 */
+ iv_len = 0;
+ }
+
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN, iv_len);
+ }
+
+ /* Add output data */
+ if (req_flags & VALID_MAC_BUF) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, zs_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void
+cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen;
+ struct cpt_ctx *cpt_ctx;
+ uint8_t snow3g, iv_len = 16;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset;
+ uint32_t encr_data_len;
+ int flags;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma;
+ uint64_t *offset_vaddr, offset_dma;
+ uint32_t *iv_s, iv[4], j;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ encr_offset = ENCR_OFFSET(d_offs) / 8;
+ encr_data_len = ENCR_DLEN(d_lens);
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ snow3g = cpt_ctx->snow3g;
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+
+ /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
+
+ opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
+ (0 << 3) | (flags & 0x7));
+
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = encr_offset +
+ (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* IV */
+ iv_s = params->iv_buf;
+ if (snow3g) {
+ /*
+ * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
+ * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
+ */
+
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[3 - j];
+ } else {
+ /* ZUC doesn't need a swap */
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[j];
+ }
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = encr_data_len;
+
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((req_flags & SINGLE_BUF_INPLACE) &&
+ (req_flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
+ OFF_CTRL_LEN - iv_len);
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ req->ist.ei1 = offset_dma;
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ + outputlen - iv_len);
+
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ if (likely(iv_len)) {
+ uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+ }
+
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ } else {
+ uint32_t i, g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t *iv_d;
+
+ /* save space for offset and iv... */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word */
+
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov,
+ 0, &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* IV */
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, zs_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void
+cpt_kasumi_enc_prep(uint32_t req_flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen = 0;
+ struct cpt_ctx *cpt_ctx;
+ uint32_t mac_len = 0;
+ uint8_t i = 0;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len;
+ int flags;
+ uint8_t *iv_s, *iv_d, iv_len = 8;
+ uint8_t dir = 0;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma;
+ uint64_t *offset_vaddr, offset_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+
+ encr_offset = ENCR_OFFSET(d_offs) / 8;
+ auth_offset = AUTH_OFFSET(d_offs) / 8;
+ encr_data_len = ENCR_DLEN(d_lens);
+ auth_data_len = AUTH_DLEN(d_lens);
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ mac_len = cpt_ctx->mac_len;
+
+ if (flags == 0x0)
+ iv_s = params->iv_buf;
+ else
+ iv_s = params->auth_iv_buf;
+
+ dir = iv_s[8] & 0x1;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+
+ /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
+ opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ (dir << 4) | (0 << 3) | (flags & 0x7));
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* consider iv len */
+ if (flags == 0x0) {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* save space for offset ctrl and iv */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ if (flags == 0x0) {
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ } else {
+ inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
+ outputlen = mac_len;
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
+ }
+
+ i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
+
+ /* IV */
+ iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
+ memcpy(iv_d, iv_s, iv_len);
+
+ /* input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags == 0x1) {
+ /* IV in SLIST only for F8 */
+ iv_len = 0;
+ }
+
+ /* IV */
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ if (req_flags & VALID_MAC_BUF) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, k_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void
+cpt_kasumi_dec_prep(uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen;
+ struct cpt_ctx *cpt_ctx;
+ uint8_t i = 0, iv_len = 8;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset;
+ uint32_t encr_data_len;
+ int flags;
+ uint8_t dir = 0;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma;
+ uint64_t *offset_vaddr, offset_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+
+ encr_offset = ENCR_OFFSET(d_offs) / 8;
+ encr_data_len = ENCR_DLEN(d_lens);
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+
+ /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
+ opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ (dir << 4) | (0 << 3) | (flags & 0x7));
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* save space for offset ctrl & iv */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+
+ i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
+
+ /* IV */
+ memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
+ params->iv_buf, iv_len);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov,
+ 0, &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* IV */
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = size;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, k_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return;
+}
+
+static __rte_always_inline void *
+cpt_fc_dec_hmac_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *fc_params,
+ void *op)
+{
+ struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
+ uint8_t fc_type;
+ void *prep_req = NULL;
+
+ fc_type = ctx->fc_type;
+
+ if (likely(fc_type == FC_GEN)) {
+ cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
+ } else if (fc_type == ZUC_SNOW3G) {
+ cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
+ } else if (fc_type == KASUMI) {
+ cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
+ }
+
+ /*
+ * For AUTH_ONLY case,
+ * MC only supports digest generation and verification
+ * should be done in software by memcmp()
+ */
+
+ return prep_req;
+}
+
+static __rte_always_inline void *__rte_hot
+cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
+ fc_params_t *fc_params, void *op)
+{
+ struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
+ uint8_t fc_type;
+ void *prep_req = NULL;
+
+ fc_type = ctx->fc_type;
+
+ /* Common api for rest of the ops */
+ if (likely(fc_type == FC_GEN)) {
+ cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
+ } else if (fc_type == ZUC_SNOW3G) {
+ cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
+ } else if (fc_type == KASUMI) {
+ cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
+ } else if (fc_type == HASH_HMAC) {
+ cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
+ }
+
+ return prep_req;
+}
+
+static __rte_always_inline int
+cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
+ uint16_t key_len, uint16_t mac_len)
+{
+ struct cpt_ctx *cpt_ctx = ctx;
+ mc_fc_context_t *fctx = &cpt_ctx->fctx;
+
+ if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
+ uint32_t keyx[4];
+
+ if (key_len != 16)
+ return -1;
+ /* No support for AEAD yet */
+ if (cpt_ctx->enc_cipher)
+ return -1;
+ /* For ZUC/SNOW3G/Kasumi */
+ switch (type) {
+ case SNOW3G_UIA2:
+ cpt_ctx->snow3g = 1;
+ gen_key_snow3g(key, keyx);
+ memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ cpt_ctx->fc_type = ZUC_SNOW3G;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ case ZUC_EIA3:
+ cpt_ctx->snow3g = 0;
+ memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
+ memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ cpt_ctx->fc_type = ZUC_SNOW3G;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ case KASUMI_F9_ECB:
+ /* Kasumi ECB mode */
+ cpt_ctx->k_ecb = 1;
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->fc_type = KASUMI;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ case KASUMI_F9_CBC:
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->fc_type = KASUMI;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ default:
+ return -1;
+ }
+ cpt_ctx->mac_len = 4;
+ cpt_ctx->hash_type = type;
+ return 0;
+ }
+
+ if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
+ if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
+ cpt_ctx->fc_type = HASH_HMAC;
+ }
+
+ if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
+ return -1;
+
+ /* For GMAC auth, cipher must be NULL */
+ if (type == GMAC_TYPE)
+ fctx->enc.enc_cipher = 0;
+
+ fctx->enc.hash_type = cpt_ctx->hash_type = type;
+ fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
+
+ if (key_len) {
+ cpt_ctx->hmac = 1;
+ memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
+ memcpy(cpt_ctx->auth_key, key, key_len);
+ cpt_ctx->auth_key_len = key_len;
+ memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
+ memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
+
+ if (key_len <= 64)
+ memcpy(fctx->hmac.opad, key, key_len);
+ fctx->enc.auth_input_type = 1;
+ }
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_aead(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_aead_xform *aead_form;
+ cipher_type_t enc_type = 0; /* NULL Cipher type */
+ auth_type_t auth_type = 0; /* NULL Auth type */
+ uint32_t cipher_key_len = 0;
+ uint8_t aes_gcm = 0;
+ aead_form = &xform->aead;
+ void *ctx = SESS_PRIV(sess);
+
+ if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
+ sess->cpt_op |= CPT_OP_AUTH_GENERATE;
+ } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
+ sess->cpt_op |= CPT_OP_AUTH_VERIFY;
+ } else {
+ CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ return -1;
+ }
+ switch (aead_form->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ enc_type = AES_GCM;
+ cipher_key_len = 16;
+ aes_gcm = 1;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
+ aead_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ aead_form->algo);
+ return -1;
+ }
+ if (aead_form->key.length < cipher_key_len) {
+ CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
+ (unsigned int long)aead_form->key.length);
+ return -1;
+ }
+ sess->zsk_flag = 0;
+ sess->aes_gcm = aes_gcm;
+ sess->mac_len = aead_form->digest_length;
+ sess->iv_offset = aead_form->iv.offset;
+ sess->iv_length = aead_form->iv.length;
+ sess->aad_length = aead_form->aad_length;
+
+ if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
+ aead_form->key.length, NULL)))
+ return -1;
+
+ if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
+ aead_form->digest_length)))
+ return -1;
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_cipher(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_cipher_xform *c_form;
+ cipher_type_t enc_type = 0; /* NULL Cipher type */
+ uint32_t cipher_key_len = 0;
+ uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
+
+ c_form = &xform->cipher;
+
+ if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
+ else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
+ else {
+ CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ return -1;
+ }
+
+ switch (c_form->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ enc_type = AES_CBC;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ enc_type = DES3_CBC;
+ cipher_key_len = 24;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ /* DES is implemented using 3DES in hardware */
+ enc_type = DES3_CBC;
+ cipher_key_len = 8;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ enc_type = AES_CTR;
+ cipher_key_len = 16;
+ aes_ctr = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ enc_type = 0;
+ is_null = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ enc_type = KASUMI_F8_ECB;
+ cipher_key_len = 16;
+ zsk_flag = K_F8;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ enc_type = SNOW3G_UEA2;
+ cipher_key_len = 16;
+ zsk_flag = ZS_EA;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ enc_type = ZUC_EEA3;
+ cipher_key_len = 16;
+ zsk_flag = ZS_EA;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ enc_type = AES_XTS;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ enc_type = DES3_ECB;
+ cipher_key_len = 24;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ enc_type = AES_ECB;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
+ c_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ c_form->algo);
+ return -1;
+ }
+
+ if (c_form->key.length < cipher_key_len) {
+ CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
+ (unsigned long) c_form->key.length);
+ return -1;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = 0;
+ sess->aes_ctr = aes_ctr;
+ sess->iv_offset = c_form->iv.offset;
+ sess->iv_length = c_form->iv.length;
+ sess->is_null = is_null;
+
+ if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
+ c_form->key.data, c_form->key.length, NULL)))
+ return -1;
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_auth(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_auth_xform *a_form;
+ auth_type_t auth_type = 0; /* NULL Auth type */
+ uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
+
+ a_form = &xform->auth;
+
+ if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->cpt_op |= CPT_OP_AUTH_VERIFY;
+ else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cpt_op |= CPT_OP_AUTH_GENERATE;
+ else {
+ CPT_LOG_DP_ERR("Unknown auth operation");
+ return -1;
+ }
+
+ switch (a_form->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /* Fall through */
+ case RTE_CRYPTO_AUTH_SHA1:
+ auth_type = SHA1_TYPE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256:
+ auth_type = SHA2_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512:
+ auth_type = SHA2_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ auth_type = GMAC_TYPE;
+ aes_gcm = 1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224:
+ auth_type = SHA2_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384:
+ auth_type = SHA2_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_MD5:
+ auth_type = MD5_TYPE;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ auth_type = KASUMI_F9_ECB;
+ /*
+ * Indicate that direction needs to be taken out
+ * from end of src
+ */
+ zsk_flag = K_F9;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ auth_type = SNOW3G_UIA2;
+ zsk_flag = ZS_IA;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ auth_type = ZUC_EIA3;
+ zsk_flag = ZS_IA;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ auth_type = 0;
+ is_null = 1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
+ a_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
+ a_form->algo);
+ return -1;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = aes_gcm;
+ sess->mac_len = a_form->digest_length;
+ sess->is_null = is_null;
+ if (zsk_flag) {
+ sess->auth_iv_offset = a_form->iv.offset;
+ sess->auth_iv_length = a_form->iv.length;
+ }
+ if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
+ a_form->key.data, a_form->key.length,
+ a_form->digest_length)))
+ return -1;
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_gmac(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_auth_xform *a_form;
+ cipher_type_t enc_type = 0; /* NULL Cipher type */
+ auth_type_t auth_type = 0; /* NULL Auth type */
+ void *ctx = SESS_PRIV(sess);
+
+ a_form = &xform->auth;
+
+ if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cpt_op |= CPT_OP_ENCODE;
+ else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->cpt_op |= CPT_OP_DECODE;
+ else {
+ CPT_LOG_DP_ERR("Unknown auth operation");
+ return -1;
+ }
+
+ switch (a_form->algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ enc_type = AES_GCM;
+ auth_type = GMAC_TYPE;
+ break;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ a_form->algo);
+ return -1;
+ }
+
+ sess->zsk_flag = 0;
+ sess->aes_gcm = 0;
+ sess->is_gmac = 1;
+ sess->iv_offset = a_form->iv.offset;
+ sess->iv_length = a_form->iv.length;
+ sess->mac_len = a_form->digest_length;
+
+ if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
+ a_form->key.length, NULL)))
+ return -1;
+
+ if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
+ a_form->digest_length)))
+ return -1;
+
+ return 0;
+}
+
+static __rte_always_inline void *
+alloc_op_meta(struct rte_mbuf *m_src,
+ buf_ptr_t *buf,
+ int32_t len,
+ struct rte_mempool *cpt_meta_pool)
+{
+ uint8_t *mdata;
+
+#ifndef CPT_ALWAYS_USE_SEPARATE_BUF
+ if (likely(m_src && (m_src->nb_segs == 1))) {
+ int32_t tailroom;
+ phys_addr_t mphys;
+
+ /* Check if tailroom is sufficient to hold meta data */
+ tailroom = rte_pktmbuf_tailroom(m_src);
+ if (likely(tailroom > len + 8)) {
+ mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
+ mphys = m_src->buf_physaddr + m_src->buf_len;
+ mdata -= len;
+ mphys -= len;
+ buf->vaddr = mdata;
+ buf->dma_addr = mphys;
+ buf->size = len;
+ /* Indicate that this is a mbuf allocated mdata */
+ mdata = (uint8_t *)((uint64_t)mdata | 1ull);
+ return mdata;
+ }
+ }
+#else
+ RTE_SET_USED(m_src);
+#endif
+
+ if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+ return NULL;
+
+ buf->vaddr = mdata;
+ buf->dma_addr = rte_mempool_virt2iova(mdata);
+ buf->size = len;
+
+ return mdata;
+}
+
+/**
+ * cpt_free_metabuf - free metabuf to mempool.
+ * @param instance: pointer to instance.
+ * @param objp: pointer to the metabuf.
+ */
+static __rte_always_inline void
+free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
+{
+ bool nofree = ((uintptr_t)mdata & 1ull);
+
+ if (likely(nofree))
+ return;
+ rte_mempool_put(cpt_meta_pool, mdata);
+}
+
+static __rte_always_inline uint32_t
+prepare_iov_from_pkt(struct rte_mbuf *pkt,
+ iov_ptr_t *iovec, uint32_t start_offset)
+{
+ uint16_t index = 0;
+ void *seg_data = NULL;
+ phys_addr_t seg_phys;
+ int32_t seg_size = 0;
+
+ if (!pkt) {
+ iovec->buf_cnt = 0;
+ return 0;
+ }
+
+ if (!start_offset) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+ } else {
+ while (start_offset >= pkt->data_len) {
+ start_offset -= pkt->data_len;
+ pkt = pkt->next;
+ }
+
+ seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
+ seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
+ seg_size = pkt->data_len - start_offset;
+ if (!seg_size)
+ return 1;
+ }
+
+ /* first seg */
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+ index++;
+ pkt = pkt->next;
+
+ while (unlikely(pkt != NULL)) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+ if (!seg_size)
+ break;
+
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+
+ index++;
+
+ pkt = pkt->next;
+ }
+
+ iovec->buf_cnt = index;
+ return 0;
+}
+
+static __rte_always_inline uint32_t
+prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
+ fc_params_t *param,
+ uint32_t *flags)
+{
+ uint16_t index = 0;
+ void *seg_data = NULL;
+ phys_addr_t seg_phys;
+ uint32_t seg_size = 0;
+ iov_ptr_t *iovec;
+
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+
+ /* first seg */
+ if (likely(!pkt->next)) {
+ uint32_t headroom, tailroom;
+
+ *flags |= SINGLE_BUF_INPLACE;
+ headroom = rte_pktmbuf_headroom(pkt);
+ tailroom = rte_pktmbuf_tailroom(pkt);
+ if (likely((headroom >= 24) &&
+ (tailroom >= 8))) {
+ /* In 83XX this is prerequivisit for Direct mode */
+ *flags |= SINGLE_BUF_HEADTAILROOM;
+ }
+ param->bufs[0].vaddr = seg_data;
+ param->bufs[0].dma_addr = seg_phys;
+ param->bufs[0].size = seg_size;
+ return 0;
+ }
+ iovec = param->src_iov;
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+ index++;
+ pkt = pkt->next;
+
+ while (unlikely(pkt != NULL)) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+
+ if (!seg_size)
+ break;
+
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+
+ index++;
+
+ pkt = pkt->next;
+ }
+
+ iovec->buf_cnt = index;
+ return 0;
+}
+
+static __rte_always_inline int
+fill_fc_params(struct rte_crypto_op *cop,
+ struct cpt_sess_misc *sess_misc,
+ struct cpt_qp_meta_info *m_info,
+ void **mdata_ptr,
+ void **prep_req)
+{
+ uint32_t space = 0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata = NULL;
+ uintptr_t *op;
+ uint32_t mc_hash_off;
+ uint32_t flags = 0;
+ uint64_t d_offs, d_lens;
+ struct rte_mbuf *m_src, *m_dst;
+ uint8_t cpt_op = sess_misc->cpt_op;
+#ifdef CPT_ALWAYS_USE_SG_MODE
+ uint8_t inplace = 0;
+#else
+ uint8_t inplace = 1;
+#endif
+ fc_params_t fc_params;
+ char src[SRC_IOV_SIZE];
+ char dst[SRC_IOV_SIZE];
+ uint32_t iv_buf[4];
+ int ret;
+
+ if (likely(sess_misc->iv_length)) {
+ flags |= VALID_IV_BUF;
+ fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess_misc->iv_offset);
+ if (sess_misc->aes_ctr &&
+ unlikely(sess_misc->iv_length != 16)) {
+ memcpy((uint8_t *)iv_buf,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess_misc->iv_offset), 12);
+ iv_buf[3] = rte_cpu_to_be_32(0x1);
+ fc_params.iv_buf = iv_buf;
+ }
+ }
+
+ if (sess_misc->zsk_flag) {
+ fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *,
+ sess_misc->auth_iv_offset);
+ if (sess_misc->zsk_flag != ZS_EA)
+ inplace = 0;
+ }
+ m_src = sym_op->m_src;
+ m_dst = sym_op->m_dst;
+
+ if (sess_misc->aes_gcm) {
+ uint8_t *salt;
+ uint8_t *aad_data;
+ uint16_t aad_len;
+
+ d_offs = sym_op->aead.data.offset;
+ d_lens = sym_op->aead.data.length;
+ mc_hash_off = sym_op->aead.data.offset +
+ sym_op->aead.data.length;
+
+ aad_data = sym_op->aead.aad.data;
+ aad_len = sess_misc->aad_length;
+ if (likely((aad_data + aad_len) ==
+ rte_pktmbuf_mtod_offset(m_src,
+ uint8_t *,
+ sym_op->aead.data.offset))) {
+ d_offs = (d_offs - aad_len) | (d_offs << 16);
+ d_lens = (d_lens + aad_len) | (d_lens << 32);
+ } else {
+ fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
+ fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
+ fc_params.aad_buf.size = aad_len;
+ flags |= VALID_AAD_BUF;
+ inplace = 0;
+ d_offs = d_offs << 16;
+ d_lens = d_lens << 32;
+ }
+
+ salt = fc_params.iv_buf;
+ if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
+ cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
+ sess_misc->salt = *(uint32_t *)salt;
+ }
+ fc_params.iv_buf = salt + 4;
+ if (likely(sess_misc->mac_len)) {
+ struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
+ m_src;
+
+ if (!m)
+ m = m_src;
+
+ /* hmac immediately following data is best case */
+ if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+ mc_hash_off !=
+ (uint8_t *)sym_op->aead.digest.data)) {
+ flags |= VALID_MAC_BUF;
+ fc_params.mac_buf.size = sess_misc->mac_len;
+ fc_params.mac_buf.vaddr =
+ sym_op->aead.digest.data;
+ fc_params.mac_buf.dma_addr =
+ sym_op->aead.digest.phys_addr;
+ inplace = 0;
+ }
+ }
+ } else {
+ d_offs = sym_op->cipher.data.offset;
+ d_lens = sym_op->cipher.data.length;
+ mc_hash_off = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length;
+ d_offs = (d_offs << 16) | sym_op->auth.data.offset;
+ d_lens = (d_lens << 32) | sym_op->auth.data.length;
+
+ if (mc_hash_off < (sym_op->auth.data.offset +
+ sym_op->auth.data.length)){
+ mc_hash_off = (sym_op->auth.data.offset +
+ sym_op->auth.data.length);
+ }
+ /* for gmac, salt should be updated like in gcm */
+ if (unlikely(sess_misc->is_gmac)) {
+ uint8_t *salt;
+ salt = fc_params.iv_buf;
+ if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
+ cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
+ sess_misc->salt = *(uint32_t *)salt;
+ }
+ fc_params.iv_buf = salt + 4;
+ }
+ if (likely(sess_misc->mac_len)) {
+ struct rte_mbuf *m;
+
+ m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
+ if (!m)
+ m = m_src;
+
+ /* hmac immediately following data is best case */
+ if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+ mc_hash_off !=
+ (uint8_t *)sym_op->auth.digest.data)) {
+ flags |= VALID_MAC_BUF;
+ fc_params.mac_buf.size =
+ sess_misc->mac_len;
+ fc_params.mac_buf.vaddr =
+ sym_op->auth.digest.data;
+ fc_params.mac_buf.dma_addr =
+ sym_op->auth.digest.phys_addr;
+ inplace = 0;
+ }
+ }
+ }
+ fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
+ fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
+
+ if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
+ inplace = 0;
+
+ if (likely(!m_dst && inplace)) {
+ /* Case of single buffer without AAD buf or
+ * separate mac buf in place and
+ * not air crypto
+ */
+ fc_params.dst_iov = fc_params.src_iov = (void *)src;
+
+ if (unlikely(prepare_iov_from_pkt_inplace(m_src,
+ &fc_params,
+ &flags))) {
+ CPT_LOG_DP_ERR("Prepare inplace src iov failed");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ } else {
+ /* Out of place processing */
+ fc_params.src_iov = (void *)src;
+ fc_params.dst_iov = (void *)dst;
+
+ /* Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ if (unlikely(m_dst != NULL)) {
+ uint32_t pkt_len;
+
+ /* Try to make room as much as src has */
+ pkt_len = rte_pktmbuf_pkt_len(m_dst);
+
+ if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
+ pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
+ if (!rte_pktmbuf_append(m_dst, pkt_len)) {
+ CPT_LOG_DP_ERR("Not enough space in "
+ "m_dst %p, need %u"
+ " more",
+ m_dst, pkt_len);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+ }
+
+ if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
+ CPT_LOG_DP_ERR("Prepare dst iov failed for "
+ "m_dst %p", m_dst);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+ } else {
+ fc_params.dst_iov = (void *)src;
+ }
+ }
+
+ if (likely(flags & SINGLE_BUF_HEADTAILROOM))
+ mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
+ m_info->lb_mlen, m_info->pool);
+ else
+ mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
+ m_info->sg_mlen, m_info->pool);
+
+ if (unlikely(mdata == NULL)) {
+ CPT_LOG_DP_ERR("Error allocating meta buffer for request");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
+ op[0] = (uintptr_t)mdata;
+ op[1] = (uintptr_t)cop;
+ op[2] = op[3] = 0; /* Used to indicate auth verify */
+ space += 4 * sizeof(uint64_t);
+
+ fc_params.meta_buf.vaddr = (uint8_t *)op + space;
+ fc_params.meta_buf.dma_addr += space;
+ fc_params.meta_buf.size -= space;
+
+ /* Finally prepare the instruction */
+ if (cpt_op & CPT_OP_ENCODE)
+ *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op);
+ else
+ *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op);
+
+ if (unlikely(*prep_req == NULL)) {
+ CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ *mdata_ptr = mdata;
+
+ return 0;
+
+free_mdata_and_exit:
+ free_op_meta(mdata, m_info->pool);
+err_exit:
+ return ret;
+}
+
+static __rte_always_inline void
+compl_auth_verify(struct rte_crypto_op *op,
+ uint8_t *gen_mac,
+ uint64_t mac_len)
+{
+ uint8_t *mac;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (sym_op->auth.digest.data)
+ mac = sym_op->auth.digest.data;
+ else
+ mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *,
+ sym_op->auth.data.length +
+ sym_op->auth.data.offset);
+ if (!mac) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (memcmp(mac, gen_mac, mac_len))
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+static __rte_always_inline void
+find_kasumif9_direction_and_length(uint8_t *src,
+ uint32_t counter_num_bytes,
+ uint32_t *addr_length_in_bits,
+ uint8_t *addr_direction)
+{
+ uint8_t found = 0;
+ uint32_t pos;
+ uint8_t last_byte;
+ while (!found && counter_num_bytes > 0) {
+ counter_num_bytes--;
+ if (src[counter_num_bytes] == 0x00)
+ continue;
+ pos = rte_bsf32(src[counter_num_bytes]);
+ if (pos == 7) {
+ if (likely(counter_num_bytes > 0)) {
+ last_byte = src[counter_num_bytes - 1];
+ *addr_direction = last_byte & 0x1;
+ *addr_length_in_bits = counter_num_bytes * 8
+ - 1;
+ }
+ } else {
+ last_byte = src[counter_num_bytes];
+ *addr_direction = (last_byte >> (pos + 1)) & 0x1;
+ *addr_length_in_bits = counter_num_bytes * 8
+ + (8 - (pos + 2));
+ }
+ found = 1;
+ }
+}
+
+/*
+ * This handles all auth only except AES_GMAC
+ */
+static __rte_always_inline int
+fill_digest_params(struct rte_crypto_op *cop,
+ struct cpt_sess_misc *sess,
+ struct cpt_qp_meta_info *m_info,
+ void **mdata_ptr,
+ void **prep_req)
+{
+ uint32_t space = 0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata;
+ phys_addr_t mphys;
+ uint64_t *op;
+ uint32_t auth_range_off;
+ uint32_t flags = 0;
+ uint64_t d_offs = 0, d_lens;
+ struct rte_mbuf *m_src, *m_dst;
+ uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
+ uint16_t mac_len = sess->mac_len;
+ fc_params_t params;
+ char src[SRC_IOV_SIZE];
+ uint8_t iv_buf[16];
+ int ret;
+
+ memset(&params, 0, sizeof(fc_params_t));
+
+ m_src = sym_op->m_src;
+
+ /* For just digest lets force mempool alloc */
+ mdata = alloc_op_meta(NULL, &params.meta_buf, m_info->sg_mlen,
+ m_info->pool);
+ if (mdata == NULL) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mphys = params.meta_buf.dma_addr;
+
+ op = mdata;
+ op[0] = (uintptr_t)mdata;
+ op[1] = (uintptr_t)cop;
+ op[2] = op[3] = 0; /* Used to indicate auth verify */
+ space += 4 * sizeof(uint64_t);
+
+ auth_range_off = sym_op->auth.data.offset;
+
+ flags = VALID_MAC_BUF;
+ params.src_iov = (void *)src;
+ if (unlikely(sess->zsk_flag)) {
+ /*
+ * Since for Zuc, Kasumi, Snow3g offsets are in bits
+ * we will send pass through even for auth only case,
+ * let MC handle it
+ */
+ d_offs = auth_range_off;
+ auth_range_off = 0;
+ params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess->auth_iv_offset);
+ if (sess->zsk_flag == K_F9) {
+ uint32_t length_in_bits, num_bytes;
+ uint8_t *src, direction = 0;
+
+ memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
+ uint8_t *), 8);
+ /*
+ * This is kasumi f9, take direction from
+ * source buffer
+ */
+ length_in_bits = cop->sym->auth.data.length;
+ num_bytes = (length_in_bits >> 3);
+ src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
+ find_kasumif9_direction_and_length(src,
+ num_bytes,
+ &length_in_bits,
+ &direction);
+ length_in_bits -= 64;
+ cop->sym->auth.data.offset += 64;
+ d_offs = cop->sym->auth.data.offset;
+ auth_range_off = d_offs / 8;
+ cop->sym->auth.data.length = length_in_bits;
+
+ /* Store it at end of auth iv */
+ iv_buf[8] = direction;
+ params.auth_iv_buf = iv_buf;
+ }
+ }
+
+ d_lens = sym_op->auth.data.length;
+
+ params.ctx_buf.vaddr = SESS_PRIV(sess);
+ params.ctx_buf.dma_addr = sess->ctx_dma_addr;
+
+ if (auth_op == CPT_OP_AUTH_GENERATE) {
+ if (sym_op->auth.digest.data) {
+ /*
+ * Digest to be generated
+ * in separate buffer
+ */
+ params.mac_buf.size =
+ sess->mac_len;
+ params.mac_buf.vaddr =
+ sym_op->auth.digest.data;
+ params.mac_buf.dma_addr =
+ sym_op->auth.digest.phys_addr;
+ } else {
+ uint32_t off = sym_op->auth.data.offset +
+ sym_op->auth.data.length;
+ int32_t dlen, space;
+
+ m_dst = sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src;
+ dlen = rte_pktmbuf_pkt_len(m_dst);
+
+ space = off + mac_len - dlen;
+ if (space > 0)
+ if (!rte_pktmbuf_append(m_dst, space)) {
+ CPT_LOG_DP_ERR("Failed to extend "
+ "mbuf by %uB", space);
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ params.mac_buf.vaddr =
+ rte_pktmbuf_mtod_offset(m_dst, void *, off);
+ params.mac_buf.dma_addr =
+ rte_pktmbuf_mtophys_offset(m_dst, off);
+ params.mac_buf.size = mac_len;
+ }
+ } else {
+ /* Need space for storing generated mac */
+ params.mac_buf.vaddr = (uint8_t *)mdata + space;
+ params.mac_buf.dma_addr = mphys + space;
+ params.mac_buf.size = mac_len;
+ space += RTE_ALIGN_CEIL(mac_len, 8);
+ op[2] = (uintptr_t)params.mac_buf.vaddr;
+ op[3] = mac_len;
+ }
+
+ params.meta_buf.vaddr = (uint8_t *)mdata + space;
+ params.meta_buf.dma_addr = mphys + space;
+ params.meta_buf.size -= space;
+
+ /* Out of place processing */
+ params.src_iov = (void *)src;
+
+ /*Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, op);
+ if (unlikely(*prep_req == NULL)) {
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ *mdata_ptr = mdata;
+
+ return 0;
+
+free_mdata_and_exit:
+ free_op_meta(mdata, m_info->pool);
+err_exit:
+ return ret;
+}
+
+#endif /*_CPT_UCODE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/cpt_ucode_asym.h b/src/spdk/dpdk/drivers/common/cpt/cpt_ucode_asym.h
new file mode 100644
index 000000000..5d1c7b5f0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/cpt_ucode_asym.h
@@ -0,0 +1,915 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _CPT_UCODE_ASYM_H_
+#define _CPT_UCODE_ASYM_H_
+
+#include <rte_common.h>
+#include <rte_crypto_asym.h>
+#include <rte_malloc.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+
+static __rte_always_inline void
+cpt_modex_param_normalize(uint8_t **data, size_t *len)
+{
+ size_t i;
+
+ /* Strip leading NUL bytes */
+
+ for (i = 0; i < *len; i++) {
+ if ((*data)[i] != 0)
+ break;
+ }
+
+ *data += i;
+ *len -= i;
+}
+
+static __rte_always_inline int
+cpt_fill_modex_params(struct cpt_asym_sess_misc *sess,
+ struct rte_crypto_asym_xform *xform)
+{
+ struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
+ size_t exp_len = xform->modex.exponent.length;
+ size_t mod_len = xform->modex.modulus.length;
+ uint8_t *exp = xform->modex.exponent.data;
+ uint8_t *mod = xform->modex.modulus.data;
+
+ cpt_modex_param_normalize(&mod, &mod_len);
+ cpt_modex_param_normalize(&exp, &exp_len);
+
+ if (unlikely(exp_len == 0 || mod_len == 0))
+ return -EINVAL;
+
+ if (unlikely(exp_len > mod_len)) {
+ CPT_LOG_DP_ERR("Exponent length greater than modulus length is not supported");
+ return -ENOTSUP;
+ }
+
+ /* Allocate buffer to hold modexp params */
+ ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
+ if (ctx->modulus.data == NULL) {
+ CPT_LOG_DP_ERR("Could not allocate buffer for modex params");
+ return -ENOMEM;
+ }
+
+ /* Set up modexp prime modulus and private exponent */
+
+ memcpy(ctx->modulus.data, mod, mod_len);
+ ctx->exponent.data = ctx->modulus.data + mod_len;
+ memcpy(ctx->exponent.data, exp, exp_len);
+
+ ctx->modulus.length = mod_len;
+ ctx->exponent.length = exp_len;
+
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_fill_rsa_params(struct cpt_asym_sess_misc *sess,
+ struct rte_crypto_asym_xform *xform)
+{
+ struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
+ struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
+ struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
+ size_t mod_len = xfrm_rsa->n.length;
+ size_t exp_len = xfrm_rsa->e.length;
+ uint64_t total_size;
+ size_t len = 0;
+
+ /* Make sure key length used is not more than mod_len/2 */
+ if (qt.p.data != NULL)
+ len = (((mod_len / 2) < qt.p.length) ? len : qt.p.length);
+
+ /* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
+ total_size = mod_len + exp_len + 5 * len;
+
+ /* Allocate buffer to hold all RSA keys */
+ rsa->n.data = rte_malloc(NULL, total_size, 0);
+ if (rsa->n.data == NULL) {
+ CPT_LOG_DP_ERR("Could not allocate buffer for RSA keys");
+ return -ENOMEM;
+ }
+
+ /* Set up RSA prime modulus and public key exponent */
+ memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
+ rsa->e.data = rsa->n.data + mod_len;
+ memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
+
+ /* Private key in quintuple format */
+ if (len != 0) {
+ rsa->qt.q.data = rsa->e.data + exp_len;
+ memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
+ rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
+ memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
+ rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
+ memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
+ rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
+ memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
+ rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
+ memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
+
+ rsa->qt.q.length = qt.q.length;
+ rsa->qt.dQ.length = qt.dQ.length;
+ rsa->qt.p.length = qt.p.length;
+ rsa->qt.dP.length = qt.dP.length;
+ rsa->qt.qInv.length = qt.qInv.length;
+ }
+ rsa->n.length = mod_len;
+ rsa->e.length = exp_len;
+
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_fill_ec_params(struct cpt_asym_sess_misc *sess,
+ struct rte_crypto_asym_xform *xform)
+{
+ struct cpt_asym_ec_ctx *ec = &sess->ec_ctx;
+
+ switch (xform->ec.curve_id) {
+ case RTE_CRYPTO_EC_GROUP_SECP192R1:
+ ec->curveid = CPT_EC_ID_P192;
+ break;
+ case RTE_CRYPTO_EC_GROUP_SECP224R1:
+ ec->curveid = CPT_EC_ID_P224;
+ break;
+ case RTE_CRYPTO_EC_GROUP_SECP256R1:
+ ec->curveid = CPT_EC_ID_P256;
+ break;
+ case RTE_CRYPTO_EC_GROUP_SECP384R1:
+ ec->curveid = CPT_EC_ID_P384;
+ break;
+ case RTE_CRYPTO_EC_GROUP_SECP521R1:
+ ec->curveid = CPT_EC_ID_P521;
+ break;
+ default:
+ /* Only NIST curves (FIPS 186-4) are supported */
+ CPT_LOG_DP_ERR("Unsupported curve");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_fill_asym_session_parameters(struct cpt_asym_sess_misc *sess,
+ struct rte_crypto_asym_xform *xform)
+{
+ int ret;
+
+ sess->xfrm_type = xform->xform_type;
+
+ switch (xform->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ ret = cpt_fill_rsa_params(sess, xform);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ ret = cpt_fill_modex_params(sess, xform);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ /* Fall through */
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ ret = cpt_fill_ec_params(sess, xform);
+ break;
+ default:
+ CPT_LOG_DP_ERR("Unsupported transform type");
+ return -ENOTSUP;
+ }
+ return ret;
+}
+
+static __rte_always_inline void
+cpt_free_asym_session_parameters(struct cpt_asym_sess_misc *sess)
+{
+ struct rte_crypto_modex_xform *mod;
+ struct rte_crypto_rsa_xform *rsa;
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ rsa = &sess->rsa_ctx;
+ if (rsa->n.data)
+ rte_free(rsa->n.data);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ mod = &sess->mod_ctx;
+ if (mod->modulus.data)
+ rte_free(mod->modulus.data);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ /* Fall through */
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ break;
+ default:
+ CPT_LOG_DP_ERR("Invalid transform type");
+ break;
+ }
+}
+
+static __rte_always_inline void
+cpt_fill_req_comp_addr(struct cpt_request_info *req, buf_ptr_t addr)
+{
+ void *completion_addr = RTE_PTR_ALIGN(addr.vaddr, 16);
+
+ /* Pointer to cpt_res_s, updated by CPT */
+ req->completion_addr = (volatile uint64_t *)completion_addr;
+ req->comp_baddr = addr.dma_addr +
+ RTE_PTR_DIFF(completion_addr, addr.vaddr);
+ *(req->completion_addr) = COMPLETION_CODE_INIT;
+}
+
+static __rte_always_inline int
+cpt_modex_prep(struct asym_op_params *modex_params,
+ struct rte_crypto_modex_xform *mod)
+{
+ struct cpt_request_info *req = modex_params->req;
+ phys_addr_t mphys = modex_params->meta_buf;
+ uint32_t exp_len = mod->exponent.length;
+ uint32_t mod_len = mod->modulus.length;
+ struct rte_crypto_mod_op_param mod_op;
+ struct rte_crypto_op **op;
+ vq_cmd_word0_t vq_cmd_w0;
+ uint64_t total_key_len;
+ opcode_info_t opcode;
+ uint32_t dlen, rlen;
+ uint32_t base_len;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ /* Extracting modex op form params->req->op[1]->asym->modex */
+ op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
+ mod_op = ((struct rte_crypto_op *)*op)->asym->modex;
+
+ base_len = mod_op.base.length;
+ if (unlikely(base_len > mod_len)) {
+ CPT_LOG_DP_ERR("Base length greater than modulus length is not supported");
+ (*op)->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -ENOTSUP;
+ }
+
+ total_key_len = mod_len + exp_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+ memcpy(dptr, mod->modulus.data, total_key_len);
+ dptr += total_key_len;
+ memcpy(dptr, mod_op.base.data, base_len);
+ dptr += base_len;
+ dlen = total_key_len + base_len;
+
+ /* Result buffer */
+ rlen = mod_len;
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_MODEX;
+ opcode.s.minor = CPT_MINOR_OP_MODEX;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = mod_len;
+ vq_cmd_w0.s.param2 = exp_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+ return 0;
+}
+
+static __rte_always_inline void
+cpt_rsa_prep(struct asym_op_params *rsa_params,
+ struct rte_crypto_rsa_xform *rsa,
+ rte_crypto_param *crypto_param)
+{
+ struct cpt_request_info *req = rsa_params->req;
+ phys_addr_t mphys = rsa_params->meta_buf;
+ struct rte_crypto_rsa_op_param rsa_op;
+ uint32_t mod_len = rsa->n.length;
+ uint32_t exp_len = rsa->e.length;
+ struct rte_crypto_op **op;
+ vq_cmd_word0_t vq_cmd_w0;
+ uint64_t total_key_len;
+ opcode_info_t opcode;
+ uint32_t dlen, rlen;
+ uint32_t in_size;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ /* Extracting rsa op form params->req->op[1]->asym->rsa */
+ op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
+ rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
+ total_key_len = mod_len + exp_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+ memcpy(dptr, rsa->n.data, total_key_len);
+ dptr += total_key_len;
+
+ in_size = crypto_param->length;
+ memcpy(dptr, crypto_param->data, in_size);
+
+ dptr += in_size;
+ dlen = total_key_len + in_size;
+
+ /* Result buffer */
+ rlen = mod_len;
+
+ if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+ /* Use mod_exp operation for no_padding type */
+ opcode.s.minor = CPT_MINOR_OP_MODEX;
+ vq_cmd_w0.s.param2 = exp_len;
+ } else {
+ if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_ENC;
+ /* Public key encrypt, use BT2*/
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2 |
+ ((uint16_t)(exp_len) << 1);
+ } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_DEC;
+ /* Public key decrypt, use BT1 */
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
+ /* + 2 for decrypted len */
+ rlen += 2;
+ }
+ }
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_MODEX;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = mod_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+}
+
+static __rte_always_inline void
+cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
+ struct rte_crypto_rsa_xform *rsa,
+ rte_crypto_param *crypto_param)
+{
+ struct cpt_request_info *req = rsa_params->req;
+ phys_addr_t mphys = rsa_params->meta_buf;
+ uint32_t qInv_len = rsa->qt.qInv.length;
+ struct rte_crypto_rsa_op_param rsa_op;
+ uint32_t dP_len = rsa->qt.dP.length;
+ uint32_t dQ_len = rsa->qt.dQ.length;
+ uint32_t p_len = rsa->qt.p.length;
+ uint32_t q_len = rsa->qt.q.length;
+ uint32_t mod_len = rsa->n.length;
+ struct rte_crypto_op **op;
+ vq_cmd_word0_t vq_cmd_w0;
+ uint64_t total_key_len;
+ opcode_info_t opcode;
+ uint32_t dlen, rlen;
+ uint32_t in_size;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ /* Extracting rsa op form params->req->op[1]->asym->rsa */
+ op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
+ rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
+ total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+ memcpy(dptr, rsa->qt.q.data, total_key_len);
+ dptr += total_key_len;
+
+ in_size = crypto_param->length;
+ memcpy(dptr, crypto_param->data, in_size);
+
+ dptr += in_size;
+ dlen = total_key_len + in_size;
+
+ /* Result buffer */
+ rlen = mod_len;
+
+ if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+ /*Use mod_exp operation for no_padding type */
+ opcode.s.minor = CPT_MINOR_OP_MODEX_CRT;
+ } else {
+ if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
+ /* Private encrypt, use BT1 */
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
+ } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
+ /* Private decrypt, use BT2 */
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2;
+ /* + 2 for decrypted len */
+ rlen += 2;
+ }
+ }
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_MODEX;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = mod_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+}
+
+static __rte_always_inline int __rte_hot
+cpt_enqueue_rsa_op(struct rte_crypto_op *op,
+ struct asym_op_params *params,
+ struct cpt_asym_sess_misc *sess)
+{
+ struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
+
+ switch (rsa->op_type) {
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->sign);
+ break;
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->message);
+ break;
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->message);
+ break;
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->cipher);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct cpt_ec_group ec_grp[CPT_EC_ID_PMAX] = {
+ {
+ .prime = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .length = 24,
+ },
+ .order = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x99, 0xDE, 0xF8, 0x36,
+ 0x14, 0x6B, 0xC9, 0xB1, 0xB4, 0xD2, 0x28, 0x31
+ },
+ .length = 24
+ },
+ },
+ {
+ .prime = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01
+ },
+ .length = 28
+ },
+ .order = {
+ .data = {
+ 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
+ 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X16, 0XA2,
+ 0XE0, 0XB8, 0XF0, 0X3E, 0X13, 0XDD, 0X29, 0X45,
+ 0X5C, 0X5C, 0X2A, 0X3D
+ },
+ .length = 28
+ },
+ },
+ {
+ .prime = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .length = 32
+ },
+ .order = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84,
+ 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51
+ },
+ .length = 32
+ },
+ },
+ {
+ .prime = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .length = 48
+ },
+ .order = {
+ .data = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF,
+ 0x58, 0x1A, 0x0D, 0xB2, 0x48, 0xB0, 0xA7, 0x7A,
+ 0xEC, 0xEC, 0x19, 0x6A, 0xCC, 0xC5, 0x29, 0x73
+ },
+ .length = 48
+ }
+ },
+ {
+ .prime = {
+ .data = {
+ 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF
+ },
+ .length = 66
+ },
+ .order = {
+ .data = {
+ 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFA, 0x51, 0x86, 0x87, 0x83, 0xBF, 0x2F,
+ 0x96, 0x6B, 0x7F, 0xCC, 0x01, 0x48, 0xF7, 0x09,
+ 0xA5, 0xD0, 0x3B, 0xB5, 0xC9, 0xB8, 0x89, 0x9C,
+ 0x47, 0xAE, 0xBB, 0x6F, 0xB7, 0x1E, 0x91, 0x38,
+ 0x64, 0x09
+ },
+ .length = 66
+ }
+ }
+};
+
+static __rte_always_inline void
+cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+ struct asym_op_params *ecdsa_params,
+ uint64_t fpm_table_iova,
+ uint8_t curveid)
+{
+ struct cpt_request_info *req = ecdsa_params->req;
+ uint16_t message_len = ecdsa->message.length;
+ phys_addr_t mphys = ecdsa_params->meta_buf;
+ uint16_t pkey_len = ecdsa->pkey.length;
+ uint16_t p_align, k_align, m_align;
+ uint16_t k_len = ecdsa->k.length;
+ uint16_t order_len, prime_len;
+ uint16_t o_offset, pk_offset;
+ vq_cmd_word0_t vq_cmd_w0;
+ opcode_info_t opcode;
+ uint16_t rlen, dlen;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ prime_len = ec_grp[curveid].prime.length;
+ order_len = ec_grp[curveid].order.length;
+
+ /* Truncate input length to curve prime length */
+ if (message_len > prime_len)
+ message_len = prime_len;
+ m_align = ROUNDUP8(message_len);
+
+ p_align = ROUNDUP8(prime_len);
+ k_align = ROUNDUP8(k_len);
+
+ /* Set write offset for order and private key */
+ o_offset = prime_len - order_len;
+ pk_offset = prime_len - pkey_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+
+ /*
+ * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len),
+ * ROUNDUP8(priv key len, prime len, order len)).
+ * Please note, private key, order cannot exceed prime
+ * length i.e 3 * p_align.
+ */
+ dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 3;
+
+ memset(dptr, 0, dlen);
+
+ *(uint64_t *)dptr = fpm_table_iova;
+ dptr += sizeof(fpm_table_iova);
+
+ memcpy(dptr, ecdsa->k.data, k_len);
+ dptr += k_align;
+
+ memcpy(dptr, ec_grp[curveid].prime.data, prime_len);
+ dptr += p_align;
+
+ memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len);
+ dptr += p_align;
+
+ memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len);
+ dptr += p_align;
+
+ memcpy(dptr, ecdsa->message.data, message_len);
+ dptr += m_align;
+
+ /* 2 * prime length (for sign r and s ) */
+ rlen = 2 * p_align;
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_ECDSA;
+ opcode.s.minor = CPT_MINOR_OP_ECDSA_SIGN;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = curveid | (message_len << 8);
+ vq_cmd_w0.s.param2 = k_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+}
+
+static __rte_always_inline void
+cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+ struct asym_op_params *ecdsa_params,
+ uint64_t fpm_table_iova,
+ uint8_t curveid)
+{
+ struct cpt_request_info *req = ecdsa_params->req;
+ uint32_t message_len = ecdsa->message.length;
+ phys_addr_t mphys = ecdsa_params->meta_buf;
+ uint16_t o_offset, r_offset, s_offset;
+ uint16_t qx_len = ecdsa->q.x.length;
+ uint16_t qy_len = ecdsa->q.y.length;
+ uint16_t r_len = ecdsa->r.length;
+ uint16_t s_len = ecdsa->s.length;
+ uint16_t order_len, prime_len;
+ uint16_t qx_offset, qy_offset;
+ uint16_t p_align, m_align;
+ vq_cmd_word0_t vq_cmd_w0;
+ opcode_info_t opcode;
+ buf_ptr_t caddr;
+ uint16_t dlen;
+ uint8_t *dptr;
+
+ prime_len = ec_grp[curveid].prime.length;
+ order_len = ec_grp[curveid].order.length;
+
+ /* Truncate input length to curve prime length */
+ if (message_len > prime_len)
+ message_len = prime_len;
+
+ m_align = ROUNDUP8(message_len);
+ p_align = ROUNDUP8(prime_len);
+
+ /* Set write offset for sign, order and public key coordinates */
+ o_offset = prime_len - order_len;
+ qx_offset = prime_len - qx_len;
+ qy_offset = prime_len - qy_len;
+ r_offset = prime_len - r_len;
+ s_offset = prime_len - s_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+
+ /*
+ * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
+ * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
+ * prime len, order len)).
+ * Please note sign, public key and order can not excede prime length
+ * i.e. 6 * p_align
+ */
+ dlen = sizeof(fpm_table_iova) + m_align + (6 * p_align);
+
+ memset(dptr, 0, dlen);
+
+ *(uint64_t *)dptr = fpm_table_iova;
+ dptr += sizeof(fpm_table_iova);
+
+ memcpy(dptr + r_offset, ecdsa->r.data, r_len);
+ dptr += p_align;
+
+ memcpy(dptr + s_offset, ecdsa->s.data, s_len);
+ dptr += p_align;
+
+ memcpy(dptr, ecdsa->message.data, message_len);
+ dptr += m_align;
+
+ memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len);
+ dptr += p_align;
+
+ memcpy(dptr, ec_grp[curveid].prime.data, prime_len);
+ dptr += p_align;
+
+ memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len);
+ dptr += p_align;
+
+ memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len);
+ dptr += p_align;
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_ECDSA;
+ opcode.s.minor = CPT_MINOR_OP_ECDSA_VERIFY;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = curveid | (message_len << 8);
+ vq_cmd_w0.s.param2 = 0;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)dptr;
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + 1;
+ caddr.dma_addr = mphys + dlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+}
+
+static __rte_always_inline int __rte_hot
+cpt_enqueue_ecdsa_op(struct rte_crypto_op *op,
+ struct asym_op_params *params,
+ struct cpt_asym_sess_misc *sess,
+ uint64_t *fpm_iova)
+{
+ struct rte_crypto_ecdsa_op_param *ecdsa = &op->asym->ecdsa;
+ uint8_t curveid = sess->ec_ctx.curveid;
+
+ if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+ cpt_ecdsa_sign_prep(ecdsa, params, fpm_iova[curveid], curveid);
+ else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+ cpt_ecdsa_verify_prep(ecdsa, params, fpm_iova[curveid],
+ curveid);
+ else {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
+ struct asym_op_params *asym_params,
+ uint8_t curveid)
+{
+ struct cpt_request_info *req = asym_params->req;
+ phys_addr_t mphys = asym_params->meta_buf;
+ uint16_t x1_len = ecpm->p.x.length;
+ uint16_t y1_len = ecpm->p.y.length;
+ uint16_t scalar_align, p_align;
+ uint16_t dlen, rlen, prime_len;
+ uint16_t x1_offset, y1_offset;
+ vq_cmd_word0_t vq_cmd_w0;
+ opcode_info_t opcode;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ prime_len = ec_grp[curveid].prime.length;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+
+ p_align = ROUNDUP8(prime_len);
+ scalar_align = ROUNDUP8(ecpm->scalar.length);
+
+ /*
+ * Set dlen = sum(ROUNDUP8(input point(x and y coordinates), prime,
+ * scalar length),
+ * Please note point length is equivalent to prime of the curve
+ */
+ dlen = 3 * p_align + scalar_align;
+
+ x1_offset = prime_len - x1_len;
+ y1_offset = prime_len - y1_len;
+
+ memset(dptr, 0, dlen);
+
+ /* Copy input point, scalar, prime */
+ memcpy(dptr + x1_offset, ecpm->p.x.data, x1_len);
+ dptr += p_align;
+ memcpy(dptr + y1_offset, ecpm->p.y.data, y1_len);
+ dptr += p_align;
+ memcpy(dptr, ecpm->scalar.data, ecpm->scalar.length);
+ dptr += scalar_align;
+ memcpy(dptr, ec_grp[curveid].prime.data, ec_grp[curveid].prime.length);
+ dptr += p_align;
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_ECC;
+ opcode.s.minor = CPT_MINOR_OP_ECC_UMP;
+
+ /* GP op header */
+ vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.param1 = curveid;
+ vq_cmd_w0.s.param2 = ecpm->scalar.length;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result buffer will store output point where length of
+ * each coordinate will be of prime length, thus set
+ * rlen to twice of prime length.
+ */
+ rlen = p_align << 1;
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status by the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+ return 0;
+}
+#endif /* _CPT_UCODE_ASYM_H_ */
diff --git a/src/spdk/dpdk/drivers/common/cpt/meson.build b/src/spdk/dpdk/drivers/common/cpt/meson.build
new file mode 100644
index 000000000..beecf0da3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+sources = files('cpt_fpm_tables.c',
+ 'cpt_pmd_ops_helper.c')
+
+deps = ['kvargs', 'pci', 'cryptodev']
+includes += include_directories('../../crypto/octeontx')
diff --git a/src/spdk/dpdk/drivers/common/cpt/rte_common_cpt_version.map b/src/spdk/dpdk/drivers/common/cpt/rte_common_cpt_version.map
new file mode 100644
index 000000000..8c65cde6c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/cpt/rte_common_cpt_version.map
@@ -0,0 +1,18 @@
+DPDK_20.0 {
+ global:
+
+ cpt_pmd_ops_helper_asym_get_mlen;
+ cpt_pmd_ops_helper_get_mlen_direct_mode;
+ cpt_pmd_ops_helper_get_mlen_sg_mode;
+
+ local: *;
+};
+
+EXPERIMENTAL {
+ global:
+
+ cpt_fpm_clear;
+ cpt_fpm_init;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/common/dpaax/Makefile b/src/spdk/dpdk/drivers/common/dpaax/Makefile
new file mode 100644
index 000000000..2f4b924fd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_dpaax.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax/caamflib
+
+# versioning export map
+EXPORT_MAP := rte_common_dpaax_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += dpaax_iova_table.c dpaa_of.c caamflib.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib.c b/src/spdk/dpdk/drivers/common/dpaax/caamflib.c
new file mode 100644
index 000000000..55e20281c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib.c
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2020 NXP
+ *
+ */
+
+#include <rta.h>
+
+/*
+ * SEC HW block revision.
+ *
+ * This *must not be confused with SEC version*:
+ * - SEC HW block revision format is "v"
+ * - SEC revision format is "x.y"
+ */
+enum rta_sec_era rta_sec_era;
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/compat.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/compat.h
new file mode 100644
index 000000000..36ee4b533
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/compat.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#include <stdint.h>
+#include <errno.h>
+
+#ifdef __GLIBC__
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#ifndef __BYTE_ORDER__
+#error "Undefined endianness"
+#endif
+
+#else
+#error Environment not supported!
+#endif
+
+#ifndef __always_inline
+#define __always_inline __rte_always_inline
+#endif
+
+#ifndef __always_unused
+#define __always_unused __rte_unused
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused __rte_unused
+#endif
+
+#if defined(__GLIBC__) && !defined(pr_debug)
+#if !defined(SUPPRESS_PRINTS) && defined(RTA_DEBUG)
+#define pr_debug(fmt, ...) \
+ RTE_LOG(DEBUG, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_debug */
+
+#if defined(__GLIBC__) && !defined(pr_err)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_err(fmt, ...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_err(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_err */
+
+#if defined(__GLIBC__) && !defined(pr_warn)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_warn(fmt, ...) \
+ RTE_LOG(WARNING, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_warn(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_warn */
+
+/**
+ * ARRAY_SIZE - returns the number of elements in an array
+ * @x: array
+ */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef ALIGN
+#define ALIGN(x, a) (((x) + ((__typeof__(x))(a) - 1)) & \
+ ~((__typeof__(x))(a) - 1))
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef upper_32_bits
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ */
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#endif
+
+#ifndef lower_32_bits
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((uint32_t)(n))
+#endif
+
+/* Use Linux naming convention */
+#ifdef __GLIBC__
+ #define swab16(x) rte_bswap16(x)
+ #define swab32(x) rte_bswap32(x)
+ #define swab64(x) rte_bswap64(x)
+ /* Define cpu_to_be32 macro if not defined in the build environment */
+ #if !defined(cpu_to_be32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_be32(x) (x)
+ #else
+ #define cpu_to_be32(x) swab32(x)
+ #endif
+ #endif
+ /* Define cpu_to_le32 macro if not defined in the build environment */
+ #if !defined(cpu_to_le32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_le32(x) swab32(x)
+ #else
+ #define cpu_to_le32(x) (x)
+ #endif
+ #endif
+#endif
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc.h
new file mode 100644
index 000000000..e4139aaa9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc.h
@@ -0,0 +1,2110 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016, 2019 NXP
+ *
+ */
+
+/*
+ * SEC descriptor composition header.
+ * Definitions to support SEC descriptor instruction generation
+ */
+
+#ifndef __RTA_DESC_H__
+#define __RTA_DESC_H__
+
+/* compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "compat.h"
+#endif
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Max size of any SEC descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+#define CAAM_CMD_SZ sizeof(uint32_t)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK (0x1f << CMD_SHIFT)
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_MOVEDW (0x06 << CMD_SHIFT)
+#define CMD_MOVEB (0x07 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION ((uint32_t)(0x10 << CMD_SHIFT))
+#define CMD_SIGNATURE ((uint32_t)(0x12 << CMD_SHIFT))
+#define CMD_JUMP ((uint32_t)(0x14 << CMD_SHIFT))
+#define CMD_MATH ((uint32_t)(0x15 << CMD_SHIFT))
+#define CMD_DESC_HDR ((uint32_t)(0x16 << CMD_SHIFT))
+#define CMD_SHARED_DESC_HDR ((uint32_t)(0x17 << CMD_SHIFT))
+#define CMD_MATHI ((uint32_t)(0x1d << CMD_SHIFT))
+#define CMD_SEQ_IN_PTR ((uint32_t)(0x1e << CMD_SHIFT))
+#define CMD_SEQ_OUT_PTR ((uint32_t)(0x1f << CMD_SHIFT))
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/* ICV Check bits for Algo Operation command */
+#define ICV_CHECK_DISABLE 0
+#define ICV_CHECK_ENABLE 1
+
+/* Encap Mode check bits for Algo Operation command */
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Extended Job Descriptor Header
+ */
+#define HDR_EXT BIT(24)
+
+/*
+ * Read input frame as soon as possible (SHR HDR)
+ */
+#define HDR_RIF BIT(25)
+
+/*
+ * Require SEQ LIODN to be the Same (JOB HDR)
+ */
+#define HDR_RSLS BIT(25)
+
+/*
+ * Do Not Run - marks a descriptor not executable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR BIT(24)
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE BIT(23)
+#define HDR_ZRO BIT(15)
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
+#define HDR_START_IDX_MASK_ERA10 (0x7f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+/* If shared descriptor header, 7-bit length era10 onwards*/
+#define HDR_DESCLEN_SHR_MASK_ERA10 0x7f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED BIT(14)
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED BIT(13)
+
+/* Clear Input FiFO (if SharedDesc) */
+#define HDR_CLEAR_IFIFO BIT(13)
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX BIT(12)
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED BIT(12)
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE BIT(11)
+
+/* Propagate DNR property to SharedDesc */
+#define HDR_PROP_DNR BIT(11)
+
+/* DECO Select Valid */
+#define HDR_EXT_DSEL_VALID BIT(7)
+
+/* Fake trusted descriptor */
+#define HDR_EXT_FTD BIT(8)
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS1 (1 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS2 (2 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF BIT(24)
+#define KEY_VLF BIT(24)
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM BIT(23)
+
+/*
+ * Already in Input Data FIFO - the Input Data Sequence is not read, since it is
+ * already in the Input Data FIFO.
+ */
+#define KEY_AIDF BIT(23)
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if this descriptor is trusted
+ */
+#define KEY_ENC BIT(22)
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB BIT(21)
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT BIT(20)
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK BIT(15)
+
+/*
+ * Plaintext Store
+ */
+#define KEY_PTS BIT(14)
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF BIT(24)
+#define LDST_VLF BIT(24)
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM BIT(23)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQCTRL (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQDAR (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_STAT (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_DCHKSM (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PID (0x04 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_GTR (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STR (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZL (0x70 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZM (0x71 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_L (0x72 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_M (0x73 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZL (0x74 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZM (0x75 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IFNSR (0x76 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_OFNSR (0x77 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_ALTSOURCE (0x78 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO BIT(2)
+#define LDOFF_DISABLE_AUTO_NFIFO BIT(3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT BIT(7)
+#define LDLEN_RST_CHA_OFIFO_PTR BIT(6)
+#define LDLEN_RST_OFIFO BIT(5)
+#define LDLEN_SET_OFIFO_OFF_VALID BIT(4)
+#define LDLEN_SET_OFIFO_OFF_RSVD BIT(3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/* CCB Clear Written Register bits */
+#define CLRW_CLR_C1MODE BIT(0)
+#define CLRW_CLR_C1DATAS BIT(2)
+#define CLRW_CLR_C1ICV BIT(3)
+#define CLRW_CLR_C1CTX BIT(5)
+#define CLRW_CLR_C1KEY BIT(6)
+#define CLRW_CLR_PK_A BIT(12)
+#define CLRW_CLR_PK_B BIT(13)
+#define CLRW_CLR_PK_N BIT(14)
+#define CLRW_CLR_PK_E BIT(15)
+#define CLRW_CLR_C2MODE BIT(16)
+#define CLRW_CLR_C2KEYS BIT(17)
+#define CLRW_CLR_C2DATAS BIT(18)
+#define CLRW_CLR_C2CTX BIT(21)
+#define CLRW_CLR_C2KEY BIT(22)
+#define CLRW_RESET_CLS2_DONE BIT(26) /* era 4 */
+#define CLRW_RESET_CLS1_DONE BIT(27) /* era 4 */
+#define CLRW_RESET_CLS2_CHA BIT(28) /* era 4 */
+#define CLRW_RESET_CLS1_CHA BIT(29) /* era 4 */
+#define CLRW_RESET_OFIFO BIT(30) /* era 3 */
+#define CLRW_RESET_IFIFO_DFIFO BIT(31) /* era 3 */
+
+/* CHA Control Register bits */
+#define CCTRL_RESET_CHA_ALL BIT(0)
+#define CCTRL_RESET_CHA_AESA BIT(1)
+#define CCTRL_RESET_CHA_DESA BIT(2)
+#define CCTRL_RESET_CHA_AFHA BIT(3)
+#define CCTRL_RESET_CHA_KFHA BIT(4)
+#define CCTRL_RESET_CHA_SF8A BIT(5)
+#define CCTRL_RESET_CHA_PKHA BIT(6)
+#define CCTRL_RESET_CHA_MDHA BIT(7)
+#define CCTRL_RESET_CHA_CRCA BIT(8)
+#define CCTRL_RESET_CHA_RNG BIT(9)
+#define CCTRL_RESET_CHA_SF9A BIT(10)
+#define CCTRL_RESET_CHA_ZUCE BIT(11)
+#define CCTRL_RESET_CHA_ZUCA BIT(12)
+#define CCTRL_UNLOAD_PK_A0 BIT(16)
+#define CCTRL_UNLOAD_PK_A1 BIT(17)
+#define CCTRL_UNLOAD_PK_A2 BIT(18)
+#define CCTRL_UNLOAD_PK_A3 BIT(19)
+#define CCTRL_UNLOAD_PK_B0 BIT(20)
+#define CCTRL_UNLOAD_PK_B1 BIT(21)
+#define CCTRL_UNLOAD_PK_B2 BIT(22)
+#define CCTRL_UNLOAD_PK_B3 BIT(23)
+#define CCTRL_UNLOAD_PK_N BIT(24)
+#define CCTRL_UNLOAD_PK_A BIT(26)
+#define CCTRL_UNLOAD_PK_B BIT(27)
+#define CCTRL_UNLOAD_SBOX BIT(28)
+
+/* IRQ Control Register (CxCIRQ) bits */
+#define CIRQ_ADI BIT(1)
+#define CIRQ_DDI BIT(2)
+#define CIRQ_RCDI BIT(3)
+#define CIRQ_KDI BIT(4)
+#define CIRQ_S8DI BIT(5)
+#define CIRQ_PDI BIT(6)
+#define CIRQ_MDI BIT(7)
+#define CIRQ_CDI BIT(8)
+#define CIRQ_RNDI BIT(9)
+#define CIRQ_S9DI BIT(10)
+#define CIRQ_ZEDI BIT(11) /* valid for Era 5 or higher */
+#define CIRQ_ZADI BIT(12) /* valid for Era 5 or higher */
+#define CIRQ_AEI BIT(17)
+#define CIRQ_DEI BIT(18)
+#define CIRQ_RCEI BIT(19)
+#define CIRQ_KEI BIT(20)
+#define CIRQ_S8EI BIT(21)
+#define CIRQ_PEI BIT(22)
+#define CIRQ_MEI BIT(23)
+#define CIRQ_CEI BIT(24)
+#define CIRQ_RNEI BIT(25)
+#define CIRQ_S9EI BIT(26)
+#define CIRQ_ZEEI BIT(27) /* valid for Era 5 or higher */
+#define CIRQ_ZAEI BIT(28) /* valid for Era 5 or higher */
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_BOTH (0x03 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF BIT(24)
+#define FIFOLDST_VLF BIT(24)
+
+/*
+ * Immediate - Data follows command in descriptor
+ * AIDF - Already in Input Data FIFO
+ */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_AIDF_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM BIT(23)
+#define FIFOLD_AIDF BIT(23)
+
+#define FIFOST_IMM_SHIFT 23
+#define FIFOST_IMM_MASK (1 << FIFOST_IMM_SHIFT)
+#define FIFOST_IMM BIT(23)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT BIT(23)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT BIT(22)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0f << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA2 (0x31 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_DIFFIEHELLMAN (0x17 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENCRYPT (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADECRYPT (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_PDU (0x32 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_SDU (0x33 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_USER (0x42 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL (0x43 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL_MIXED (0x44 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_USER_RN (0x45 << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_NULL 0x0B00
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+#define OP_PCL_IPSEC_AES_NULL_WITH_GMAC 0x1500
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_AES_CMAC_96 0x0008
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+
+/*
+ * For SSL/TLS/DTLS - OP_PCL_TLS
+ * For more details see IANA TLS Cipher Suite registry:
+ * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ * Note: for private/internal use (reserved by IANA) - OP_PCL_PVT_TLS
+ */
+#define OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5 0x0003
+#define OP_PCL_TLS_RSA_WITH_RC4_128_MD5 0x0004
+#define OP_PCL_TLS_RSA_WITH_RC4_128_SHA 0x0005
+#define OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS_RSA_WITH_DES_CBC_SHA 0x0009
+#define OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA 0x000a
+#define OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA 0x000b
+#define OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA 0x000c
+#define OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA 0x000d
+#define OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA 0x000e
+#define OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA 0x000f
+#define OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA 0x0010
+#define OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA 0x0011
+#define OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA 0x0012
+#define OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA 0x0013
+#define OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0014
+#define OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA 0x0015
+#define OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA 0x0016
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 0x0017
+#define OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5 0x0018
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA 0x0019
+#define OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA 0x001a
+#define OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA 0x001b
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_SHA 0x0020
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 0x0023
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5 0x0022
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_MD5 0x0024
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA 0x0026
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA 0x0028
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 0x0029
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 0x002b
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA 0x0030
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA 0x0031
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA 0x0032
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA 0x0033
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA 0x0034
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA 0x0036
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA 0x0037
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA 0x0038
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA 0x0039
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA 0x003a
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256 0x003c
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256 0x003d
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 0x003e
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 0x003f
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 0x0040
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 0x0067
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 0x0068
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 0x0069
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 0x006a
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 0x006b
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256 0x006c
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256 0x006d
+#define OP_PCL_TLS_PSK_WITH_RC4_128_SHA 0x008a
+#define OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA 0x008b
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA 0x008c
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA 0x008d
+#define OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA 0x008e
+#define OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA 0x008f
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA 0x0090
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA 0x0091
+#define OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA 0x0092
+#define OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA 0x0093
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA 0x0094
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA 0x0095
+#define OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256 0x009c
+#define OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384 0x009d
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 0x009e
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 0x009f
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 0x00a0
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 0x00a1
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 0x00a2
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 0x00a3
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 0x00a4
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 0x00a5
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256 0x00a6
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384 0x00a7
+#define OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256 0x00a8
+#define OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384 0x00a9
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 0x00aa
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 0x00ab
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 0x00ac
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 0x00ad
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256 0x00ae
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384 0x00af
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 0x00b2
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 0x00b3
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 0x00b6
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 0x00b7
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA 0xc002
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc003
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA 0xc004
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA 0xc005
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA 0xc007
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc008
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA 0xc009
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA 0xc00a
+#define OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA 0xc00c
+#define OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA 0xc00d
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA 0xc00e
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA 0xc00f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA 0xc011
+#define OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA 0xc012
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA 0xc013
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA 0xc014
+#define OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA 0xc016
+#define OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA 0xc017
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA 0xc018
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA 0xc019
+#define OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0xc01a
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0xc01b
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0xc01c
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA 0xc01d
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0xc01e
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0xc01f
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA 0xc020
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0xc021
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0xc022
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 0xc023
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 0xc024
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 0xc025
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 0xc026
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 0xc027
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 0xc028
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 0xc029
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 0xc02a
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0xc02b
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0xc02c
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0xc02d
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 0xc02e
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 0xc02f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 0xc030
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 0xc031
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 0xc032
+#define OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA 0xc033
+#define OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA 0xc034
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA 0xc035
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA 0xc036
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 0xc037
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 0xc038
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF 0xffff
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC 0x0001
+
+/* 3G DCRC protinfos */
+#define OP_PCL_3G_DCRC_CRC7 0x0710
+#define OP_PCL_3G_DCRC_CRC11 0x0B10
+
+/* 3G RLC protinfos */
+#define OP_PCL_3G_RLC_NULL 0x0000
+#define OP_PCL_3G_RLC_KASUMI 0x0001
+#define OP_PCL_3G_RLC_SNOW 0x0002
+
+/* LTE protinfos */
+#define OP_PCL_LTE_NULL 0x0000
+#define OP_PCL_LTE_SNOW 0x0001
+#define OP_PCL_LTE_AES 0x0002
+#define OP_PCL_LTE_ZUC 0x0003
+
+/* LTE mixed protinfos */
+#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
+#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 << OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_DSA_MSG BIT(10)
+#define OP_PCL_PKPROT_HASH_SHIFT 7
+#define OP_PCL_PKPROT_HASH_MASK (7 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_MD5 (0 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA1 (1 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA224 (2 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA256 (3 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA384 (4 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA512 (5 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_EKT_Z BIT(6)
+#define OP_PCL_PKPROT_DECRYPT_Z BIT(5)
+#define OP_PCL_PKPROT_EKT_PRI BIT(4)
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT_PRI BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* Blob protinfos */
+#define OP_PCL_BLOB_TKEK_SHIFT 9
+#define OP_PCL_BLOB_TKEK BIT(9)
+#define OP_PCL_BLOB_EKT_SHIFT 8
+#define OP_PCL_BLOB_EKT BIT(8)
+#define OP_PCL_BLOB_REG_SHIFT 4
+#define OP_PCL_BLOB_REG_MASK (0xF << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_MEMORY (0x0 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY1 (0x1 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY2 (0x3 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_AFHA_SBOX (0x5 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_SPLIT (0x7 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_PKE (0x9 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_SEC_MEM_SHIFT 3
+#define OP_PCL_BLOB_SEC_MEM BIT(3)
+#define OP_PCL_BLOB_BLACK BIT(2)
+#define OP_PCL_BLOB_FORMAT_SHIFT 0
+#define OP_PCL_BLOB_FORMAT_MASK 0x3
+#define OP_PCL_BLOB_FORMAT_NORMAL 0
+#define OP_PCL_BLOB_FORMAT_MASTER_VER 2
+#define OP_PCL_BLOB_FORMAT_TEST 3
+
+/* IKE / IKEv2 protinfos */
+#define OP_PCL_IKE_HMAC_MD5 0x0100
+#define OP_PCL_IKE_HMAC_SHA1 0x0200
+#define OP_PCL_IKE_HMAC_AES128_CBC 0x0400
+#define OP_PCL_IKE_HMAC_SHA256 0x0500
+#define OP_PCL_IKE_HMAC_SHA384 0x0600
+#define OP_PCL_IKE_HMAC_SHA512 0x0700
+#define OP_PCL_IKE_HMAC_AES128_CMAC 0x0800
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* RSA Protinfo */
+#define OP_PCL_RSAPROT_OP_MASK 3
+#define OP_PCL_RSAPROT_OP_ENC_F_IN 0
+#define OP_PCL_RSAPROT_OP_ENC_F_OUT 1
+#define OP_PCL_RSAPROT_OP_DEC_ND 0
+#define OP_PCL_RSAPROT_OP_DEC_PQD 1
+#define OP_PCL_RSAPROT_OP_DEC_PQDPDQC 2
+#define OP_PCL_RSAPROT_FFF_SHIFT 4
+#define OP_PCL_RSAPROT_FFF_MASK (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_RED (0 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_ENC (1 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_ENC (5 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_EKT (3 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_EKT (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_PPP_SHIFT 8
+#define OP_PCL_RSAPROT_PPP_MASK (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_RED (0 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_ENC (1 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_ENC (5 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_EKT (3 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_EKT (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_FMT_PKCSV15 BIT(12)
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 (0x2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (0x4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCE (0xB0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCA (0xC0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x3ff << OP_ALG_AAI_SHIFT)
+
+/* block cipher AAI set */
+#define OP_ALG_AESA_MODE_MASK (0xF0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_CMAC (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC_LTE (0xd0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC (0xe0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_C2K (0x200 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_RNG_MODE_MASK (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_SHIFT OP_ALG_AAI_SHIFT
+#define OP_ALG_AAI_RNG4_SH_MASK (0x03 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_CRC_POLY_MASK (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_IVZ (0x80 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW/ZUC AAI set */
+#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF 0
+#define OP_ALG_ICV_ON BIT(1)
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT BIT(0)
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM BIT(19)
+#define OP_ALG_PKMODE_B_RAM BIT(18)
+#define OP_ALG_PKMODE_E_RAM BIT(17)
+#define OP_ALG_PKMODE_N_RAM BIT(16)
+#define OP_ALG_PKMODE_CLEARMEM BIT(0)
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_CLEARMEM_ALL (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AB (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_A (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_B (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_EN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_E (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_N (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_N_RAM)
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY BIT(19)
+#define OP_ALG_PKMODE_MOD_OUT_MONTY BIT(18)
+#define OP_ALG_PKMODE_MOD_F2M BIT(17)
+#define OP_ALG_PKMODE_MOD_R2_IN BIT(16)
+#define OP_ALG_PKMODE_PRJECTV BIT(11)
+#define OP_ALG_PKMODE_TIME_EQ BIT(10)
+
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+
+/*
+ * PKHA mode modular-arithmetic integer functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_MULT_IM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_MULT_IM_OM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_EXPO_TEQ (0x006 | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_EXPO_IM (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO_IM_TEQ (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_SML_EXP 0x016
+
+/*
+ * PKHA mode modular-arithmetic F2m functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_F2M_ADD (0x002 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL (0x005 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL_IM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_F2M_MUL_IM_OM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_F2M_EXP (0x006 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_EXP_TEQ (0x006 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_F2M_AMODN (0x007 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_INV (0x008 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_R2 (0x00c | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_GCD (0x00e | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_SML_EXP (0x016 | OP_ALG_PKMODE_MOD_F2M)
+
+/*
+ * PKHA mode ECC Integer arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_MOD_ADD 0x009
+#define OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_DBL 0x00a
+#define OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL 0x00b
+#define OP_ALG_PKMODE_ECC_MOD_MUL_TEQ (0x00b | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2 (0x00b | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/*
+ * PKHA mode ECC F2m arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_F2M_ADD (0x009 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_DBL (0x00a | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL (0x00b | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2 \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+/* PKHA mode copy-memory functions - amount based on N SIZE */
+#define OP_ALG_PKMODE_COPY_NSZ 0x10
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_A_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_B_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_B_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_N_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_N_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_N_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/* PKHA mode copy-memory functions - amount based on SRC SIZE */
+#define OP_ALG_PKMODE_COPY_SSZ 0x11
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_A_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_B_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_B_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_N_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_N_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_N_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS BIT(26)
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL BIT(25)
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQIN_PRE BIT(23)
+
+/* Use extended length following pointer */
+#define SQIN_EXT BIT(22)
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO BIT(21)
+
+/* Replace job descriptor */
+#define SQIN_RJD BIT(20)
+
+/* Sequence Out Pointer - start a new input sequence using output sequence */
+#define SQIN_SOP BIT(19)
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE BIT(23)
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO BIT(21)
+
+/*
+ * Ignore length field, add current output frame length back to SOL register.
+ * Reset tracking length of bytes written to output frame.
+ * Must be used together with SQOUT_RTO.
+ */
+#define SQOUT_RST BIT(20)
+
+/* Allow "write safe" transactions for this Output Sequence */
+#define SQOUT_EWS BIT(19)
+
+/* Use extended length following pointer */
+#define SQOUT_EXT BIT(22)
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP BIT(24)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_NO_NFIFO (0x0a << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+#define MOVE_DEST_ALTSOURCE (0x0f << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+#define MOVELEN_SIZE_SHIFT 6
+#define MOVELEN_SIZE_MASK (0x3 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_WORD (0x01 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_BYTE (0x02 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_DWORD (0x03 << MOVELEN_SIZE_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB BIT(26)
+
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU BIT(25)
+
+/* STL for MATH, SSEL for MATHI */
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL BIT(24)
+
+#define MATH_SSEL_SHIFT 24
+#define MATH_SSEL_MASK (1 << MATH_SSEL_SHIFT)
+#define MATH_SSEL BIT(24)
+
+#define MATH_SWP_SHIFT 0
+#define MATH_SWP_MASK (1 << MATH_SWP_SHIFT)
+#define MATH_SWP BIT(0)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) /* ZBYT is for MATH */
+#define MATH_FUN_FBYT (0x0a << MATH_FUN_SHIFT) /* FBYT is for MATHI */
+#define MATH_FUN_BSWAP (0x0b << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ONE (0x0f << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT 12
+#define MATHI_SRC1_SHIFT 16
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQINLEN (0x08 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQOUTLEN (0x09 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_JOBSOURCE (0x0d << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT 8
+#define MATHI_DEST_SHIFT 12
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+
+/* MATHI Immediate value */
+#define MATHI_IMM_SHIFT 4
+#define MATHI_IMM_MASK (0xff << MATHI_IMM_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL BIT(24)
+
+#define JUMP_TYPE_SHIFT 20
+#define JUMP_TYPE_MASK (0x0f << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_INC (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_GOSUB (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_DEC (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x04 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_RETURN (0x06 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x08 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x0c << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK ((0xff << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_PK_0 BIT(15)
+#define JUMP_COND_PK_GCD_1 BIT(14)
+#define JUMP_COND_PK_PRIME BIT(13)
+#define JUMP_COND_MATH_N BIT(11)
+#define JUMP_COND_MATH_Z BIT(10)
+#define JUMP_COND_MATH_C BIT(9)
+#define JUMP_COND_MATH_NV BIT(8)
+
+#define JUMP_COND_JQP (BIT(15) | JUMP_JSL)
+#define JUMP_COND_SHRD (BIT(14) | JUMP_JSL)
+#define JUMP_COND_SELF (BIT(13) | JUMP_JSL)
+#define JUMP_COND_CALM (BIT(12) | JUMP_JSL)
+#define JUMP_COND_NIP (BIT(11) | JUMP_JSL)
+#define JUMP_COND_NIFP (BIT(10) | JUMP_JSL)
+#define JUMP_COND_NOP (BIT(9) | JUMP_JSL)
+#define JUMP_COND_NCP (BIT(8) | JUMP_JSL)
+
+/* Source / destination selectors */
+#define JUMP_SRC_DST_SHIFT 12
+#define JUMP_SRC_DST_MASK (0x0f << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH0 (0x00 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH1 (0x01 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH2 (0x02 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH3 (0x03 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_DPOVRD (0x07 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQINLEN (0x08 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQOUTLEN (0x09 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQINLEN (0x0a << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQOUTLEN (0x0b << JUMP_SRC_DST_SHIFT)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT 30
+#define NFIFOENTRY_DEST_MASK ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 ((uint32_t)(2 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_BOTH ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+
+#define NFIFOENTRY_LC2_SHIFT 29
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 BIT(29)
+
+#define NFIFOENTRY_LC1_SHIFT 28
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 BIT(28)
+
+#define NFIFOENTRY_FC2_SHIFT 27
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 BIT(27)
+
+#define NFIFOENTRY_FC1_SHIFT 26
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 BIT(26)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_ALTSOURCE ((0 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_OFIFO_SYNC ((1 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_SNOOP_ALT ((3 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_BND_SHIFT 19
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND BIT(19)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC BIT(15)
+
+#define NFIFOENTRY_PR_SHIFT 15
+#define NFIFOENTRY_PR_MASK (1 << NFIFOENTRY_PR_SHIFT)
+#define NFIFOENTRY_PR BIT(15)
+
+#define NFIFOENTRY_AST_SHIFT 14
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_AST_SHIFT)
+#define NFIFOENTRY_AST BIT(14)
+
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM BIT(11)
+
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS BIT(10)
+
+#define NFIFOENTRY_DLEN_SHIFT 0
+#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT 0
+#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE BIT(31)
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN BIT(30)
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC BIT(29)
+
+#endif /* __RTA_DESC_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/algo.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/algo.h
new file mode 100644
index 000000000..345bb5b3f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/algo.h
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ *
+ */
+
+#ifndef __DESC_ALGO_H__
+#define __DESC_ALGO_H__
+
+#include "rta.h"
+#include "common.h"
+
+/**
+ * DOC: Algorithms - Shared Descriptor Constructors
+ *
+ * Shared descriptors for algorithms (i.e. not for protocols).
+ */
+
+/**
+ * cnstr_shdsc_zuce - ZUC Enc (EEA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: Cipher direction (DIR_ENC/DIR_DEC)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_zuce(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SEQLOAD(p, CONTEXT1, 0, 16, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_zuca - ZUC Auth (EIA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @chk_icv: Whether to compare and verify ICV (true/false)
+ * @authlen: size of digest
+ *
+ * The IV prepended before hmac payload must be 8 bytes consisting
+ * of COUNT||BEAERER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
+ * direction is of 1 bit - totalling to 38 bits.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_zuca(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t chk_icv,
+ uint32_t authlen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int dir = chk_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQLOAD(p, CONTEXT2, 0, 8, 0);
+
+ if (chk_icv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, authlen, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, chk_icv, dir);
+
+ SEQFIFOLOAD(p, MSG2, 0, VLF | CLASS2 | LAST2);
+
+ if (chk_icv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, authlen, LAST2);
+ else
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, authlen, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+
+/**
+ * cnstr_shdsc_snow_f8 - SNOW/f8 (UEA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: Cipher direction (DIR_ENC/DIR_DEC)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SEQLOAD(p, CONTEXT1, 0, 16, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * conv_to_zuc_eia_iv - ZUCA IV 16-byte to 8-byte convert
+ * function for 3G.
+ * @iv: 16 bytes of original IV data.
+ *
+ * From the original IV, we extract 32-bits of COUNT,
+ * 5-bits of bearer and 1-bit of direction.
+ * Refer to CAAM refman for ZUCA IV format. Then these values are
+ * appended as COUNT||BEARER||DIR continuously to make a 38-bit block.
+ * This 38-bit block is copied left justified into 8-byte array used as
+ * converted IV.
+ *
+ * Return: 8-bytes of IV data as understood by SEC HW
+ */
+
+static inline uint8_t *conv_to_zuc_eia_iv(uint8_t *iv)
+{
+ uint8_t dir = (iv[14] & 0x80) ? 4 : 0;
+
+ iv[12] = iv[4] | dir;
+ iv[13] = 0;
+ iv[14] = 0;
+ iv[15] = 0;
+
+ iv[8] = iv[0];
+ iv[9] = iv[1];
+ iv[10] = iv[2];
+ iv[11] = iv[3];
+
+ return (iv + 8);
+}
+
+/**
+ * conv_to_snow_f9_iv - SNOW/f9 (UIA2) IV 16 byte to 12 byte convert
+ * function for 3G.
+ * @iv: 16 byte original IV data
+ *
+ * Return: 12 byte IV data as understood by SEC HW
+ */
+
+static inline uint8_t *conv_to_snow_f9_iv(uint8_t *iv)
+{
+ uint8_t temp = (iv[8] == iv[0]) ? 0 : 4;
+
+ iv[12] = iv[4];
+ iv[13] = iv[5];
+ iv[14] = iv[6];
+ iv[15] = iv[7];
+
+ iv[8] = temp;
+ iv[9] = 0x00;
+ iv[10] = 0x00;
+ iv[11] = 0x00;
+
+ iv[4] = iv[0];
+ iv[5] = iv[1];
+ iv[6] = iv[2];
+ iv[7] = iv[3];
+
+ return (iv + 4);
+}
+
+/**
+ * cnstr_shdsc_snow_f9 - SNOW/f9 (UIA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @chk_icv: check or generate ICV value
+ * @authlen: size of digest
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t chk_icv,
+ uint32_t authlen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int dir = chk_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQLOAD(p, CONTEXT2, 0, 12, 0);
+
+ if (chk_icv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, authlen, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, chk_icv, dir);
+
+ SEQFIFOLOAD(p, MSG2, 0, VLF | CLASS2 | LAST2);
+
+ if (chk_icv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, authlen, LAST2);
+ else
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, authlen, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_blkcipher - block cipher transformation
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
+ * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
+ * @ivlen: IV length
+ * @dir: DIR_ENC/DIR_DEC
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv_off = 0, counter;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
+ LABEL(keyjmp);
+ LABEL(skipdk);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipdk);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, share, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (need_dk) {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+
+ pskipdk = JUMP(p, skipdk, LOCAL_JUMP, ALL_TRUE, 0);
+ }
+ SET_LABEL(p, keyjmp);
+
+ if (need_dk) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipdk);
+ } else {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ iv_off = 16;
+
+ /* IV is present first before the actual message */
+ SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
+
+ /* If IV len is less than 16 bytes, set 'counter' as 1 */
+ if (cipherdata->algmode == OP_ALG_AAI_CTR && ivlen < 16) {
+ counter = 1;
+ if (!swap)
+ counter = swab32(1);
+
+ LOAD(p, counter, CONTEXT1, (iv_off + ivlen), 16 - ivlen, IMMED);
+ }
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+
+ /* Insert sequence load/store with VLF */
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ if (need_dk)
+ PATCH_JUMP(p, pskipdk, skipdk);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_hmac - HMAC shared
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
+ * @authdata: pointer to authentication transform definitions;
+ * message digest algorithm: OP_ALG_ALGSEL_MD5/ SHA1-512.
+ * @do_icv: 0 if ICV checking is not desired, any other value if ICV checking
+ * is needed for all the packets processed by this shared descriptor
+ * @trunc_len: Length of the truncated ICV to be written in the output buffer, 0
+ * if no truncation is needed
+ *
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_hmac(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct alginfo *authdata, uint8_t do_icv,
+ uint8_t trunc_len)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint8_t storelen, opicv, dir;
+ LABEL(keyjmp);
+ LABEL(jmpprecomp);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pjmpprecomp);
+
+ /* Compute fixed-size store based on alg selection */
+ switch (authdata->algtype) {
+ case OP_ALG_ALGSEL_MD5:
+ storelen = 16;
+ break;
+ case OP_ALG_ALGSEL_SHA1:
+ storelen = 20;
+ break;
+ case OP_ALG_ALGSEL_SHA224:
+ storelen = 28;
+ break;
+ case OP_ALG_ALGSEL_SHA256:
+ storelen = 32;
+ break;
+ case OP_ALG_ALGSEL_SHA384:
+ storelen = 48;
+ break;
+ case OP_ALG_ALGSEL_SHA512:
+ storelen = 64;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trunc_len = trunc_len && (trunc_len < storelen) ? trunc_len : storelen;
+
+ opicv = do_icv ? ICV_CHECK_ENABLE : ICV_CHECK_DISABLE;
+ dir = do_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, share, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ pjmpprecomp = JUMP(p, jmpprecomp, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ SET_LABEL(p, jmpprecomp);
+
+ /* compute sequences */
+ if (opicv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, trunc_len, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+
+ /* Do load (variable length) */
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+
+ if (opicv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pjmpprecomp, jmpprecomp);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f8 - KASUMI F8 (Confidentiality) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @bearer: bearer ID (5 bits)
+ * @direction: direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SEQLOAD(p, CONTEXT1, 0, 8, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f9 - KASUMI F9 (Integrity) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @chk_icv: check or generate ICV value
+ * @authlen: size of digest
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t chk_icv,
+ uint32_t authlen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int dir = chk_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQLOAD(p, CONTEXT2, 0, 12, 0);
+
+ if (chk_icv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, authlen, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, chk_icv, dir);
+
+ SEQFIFOLOAD(p, MSG2, 0, VLF | CLASS2 | LAST2);
+
+ if (chk_icv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, authlen, LAST2);
+ else
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, authlen, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump2);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump2);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, share, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+ pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ /* cryptlen = seqinlen - assoclen */
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+ SET_LABEL(p, zeroassocjump1);
+
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* write encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+ /* jump the zero-payload commands */
+ JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* zero-payload commands */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+ JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* There is no input data */
+ SET_LABEL(p, zeroassocjump2);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+ /* write ICV */
+ SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, share, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+ SET_LABEL(p, zeroassocjump1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+ /* store encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+ /* zero-payload command */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read ICV */
+ SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_ALGO_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/common.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/common.h
new file mode 100644
index 000000000..816baacfd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/common.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_COMMON_H__
+#define __DESC_COMMON_H__
+
+#include "rta.h"
+
+/**
+ * DOC: Shared Descriptor Constructors - shared structures
+ *
+ * Data structures shared between algorithm, protocol implementations.
+ */
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_enc_flags: key encryption flags; see encrypt_flags parameter of KEY
+ * command for valid values.
+ * @key_type: enum rta_data_type
+ * @algmode: algorithm mode selector; for valid values, see documentation of the
+ * functions where it is used.
+ */
+struct alginfo {
+ uint32_t algtype;
+ uint32_t keylen;
+ uint64_t key;
+ uint32_t key_enc_flags;
+ enum rta_data_type key_type;
+ uint16_t algmode;
+};
+
+#define INLINE_KEY(alginfo) inline_flags(alginfo->key_type)
+
+/**
+ * rta_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int
+rta_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len,
+ unsigned int *data_len,
+ uint32_t *inl_mask,
+ unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * struct protcmd - Container for Protocol Operation Command fields
+ * @optype: command type
+ * @protid: protocol Identifier
+ * @protinfo: protocol Information
+ */
+struct protcmd {
+ uint32_t optype;
+ uint32_t protid;
+ uint16_t protinfo;
+};
+
+#endif /* __DESC_COMMON_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h
new file mode 100644
index 000000000..cf6fa4252
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h
@@ -0,0 +1,1613 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ *
+ */
+
+#ifndef __DESC_IPSEC_H__
+#define __DESC_IPSEC_H__
+
+#include "rta.h"
+#include "common.h"
+
+/**
+ * DOC: IPsec Shared Descriptor Constructors
+ *
+ * Shared descriptors for IPsec protocol.
+ */
+
+/* General IPSec ESP encap / decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ESN - Extended sequence included
+ */
+#define PDBOPTS_ESP_ESN 0x10
+
+/**
+ * PDBOPTS_ESP_IPVSN - Process IPv6 header
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPVSN 0x02
+
+/**
+ * PDBOPTS_ESP_TUNNEL - Tunnel mode next-header byte
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_TUNNEL 0x01
+
+/* IPSec ESP Encap PDB options */
+
+/**
+ * PDBOPTS_ESP_UPDATE_CSUM - Update ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80
+
+/**
+ * PDBOPTS_ESP_DIFFSERV - Copy TOS/TC from inner iphdr
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_DIFFSERV 0x40
+
+/**
+ * PDBOPTS_ESP_IVSRC - IV comes from internal random gen
+ */
+#define PDBOPTS_ESP_IVSRC 0x20
+
+/**
+ * PDBOPTS_ESP_IPHDRSRC - IP header comes from PDB
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPHDRSRC 0x08
+
+/**
+ * PDBOPTS_ESP_INCIPHDR - Prepend IP header to output frame
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_INCIPHDR 0x04
+
+/**
+ * PDBOPTS_ESP_OIHI_MASK - Mask for Outer IP Header Included
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_MASK 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_INL - Prepend IP header to output frame from PDB (where
+ * it is inlined).
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_INL 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_REF - Prepend IP header to output frame from PDB
+ * (referenced by pointer).
+ *
+ * Vlid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_REF 0x08
+
+/**
+ * PDBOPTS_ESP_OIHI_IF - Prepend IP header to output frame from input frame
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_IF 0x04
+
+/**
+ * PDBOPTS_ESP_NAT - Enable RFC 3948 UDP-encapsulated-ESP
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NAT 0x02
+
+/**
+ * PDBOPTS_ESP_NUC - Enable NAT UDP Checksum
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NUC 0x01
+
+/* IPSec ESP Decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ARS_MASK - antireplay window mask
+ */
+#define PDBOPTS_ESP_ARS_MASK 0xc0
+
+/**
+ * PDBOPTS_ESP_ARSNONE - No antireplay window
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00
+
+/**
+ * PDBOPTS_ESP_ARS64 - 64-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS64 0xc0
+
+/**
+ * PDBOPTS_ESP_ARS128 - 128-entry antireplay window
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ARS128 0x80
+
+/**
+ * PDBOPTS_ESP_ARS32 - 32-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS32 0x40
+
+/**
+ * PDBOPTS_ESP_VERIFY_CSUM - Validate ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20
+
+/**
+ * PDBOPTS_ESP_TECN - Implement RRFC6040 ECN tunneling from outer header to
+ * inner header.
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_TECN 0x20
+
+/**
+ * PDBOPTS_ESP_OUTFMT - Output only decapsulation
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_OUTFMT 0x08
+
+/**
+ * PDBOPTS_ESP_AOFL - Adjust out frame len
+ *
+ * Valid only for IPsec legacy mode and for SEC >= 5.3.
+ */
+#define PDBOPTS_ESP_AOFL 0x04
+
+/**
+ * PDBOPTS_ESP_ETU - EtherType Update
+ *
+ * Add corresponding ethertype (0x0800 for IPv4, 0x86dd for IPv6) in the output
+ * frame.
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ETU 0x01
+
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
+/**
+ * PDBHMO_ESP_DECAP_DTTL - IPsec ESP decrement TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_DECAP_DTTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ENCAP_DTTL - IPsec ESP increment TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_ENCAP_DTTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DIFFSERV - (Decap) DiffServ Copy - Copy the IPv4 TOS or IPv6
+ * Traffic Class byte from the outer IP header to the
+ * inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_SNR - (Encap) - Sequence Number Rollover control
+ *
+ * Configures behaviour in case of SN / ESN rollover:
+ * error if SNR = 1, rollover allowed if SNR = 0.
+ * Valid only for IPsec new mode.
+ */
+#define PDBHMO_ESP_SNR (0x01 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFBIT - (Encap) Copy DF bit - if an IPv4 tunnel mode outer IP
+ * header is coming from the PDB, copy the DF bit from the
+ * inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFV - (Decap) - DF bit value
+ *
+ * If ODF = 1, DF bit in output frame is replaced by DFV.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_DFV (0x04 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ODF - (Decap) Override DF bit in IPv4 header of decapsulated
+ * output frame.
+ *
+ * If ODF = 1, DF is replaced with the value of DFV bit.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_ODF (0x08 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * struct ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+ uint8_t iv[16];
+};
+
+
+/**
+ * struct ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+ uint32_t ctr_nonce;
+ uint32_t ctr_initial;
+ uint8_t iv[8];
+};
+
+/**
+ * struct ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header (legacy) / reserved (new) - 8b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content (only for IPsec legacy mode)
+ */
+struct ipsec_encap_pdb {
+ uint32_t options;
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ uint32_t spi;
+ uint32_t ip_hdr_len;
+ uint8_t ip_hdr[0];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_encap_pdb(struct program *program,
+ struct ipsec_encap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, pdb->options);
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ rta_copy_data(program, pdb->cbc.iv, sizeof(pdb->cbc.iv));
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, (uint8_t *)&pdb->ctr.ctr_nonce, 4);
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ rta_copy_data(program, pdb->ctr.iv, sizeof(pdb->ctr.iv));
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ __rta_out64(program, true, pdb->ccm.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ __rta_out64(program, true, pdb->gcm.iv);
+ break;
+ }
+
+ __rta_out32(program, pdb->spi);
+ __rta_out32(program, pdb->ip_hdr_len);
+
+ return start_pc;
+}
+
+/**
+ * struct ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+ uint32_t rsvd[2];
+};
+
+/**
+ * struct ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+ uint32_t ctr_nonce;
+ uint32_t ctr_initial;
+};
+
+/**
+ * struct ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+};
+
+/**
+ * struct ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+};
+
+/**
+ * struct ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags);
+ * format must be Big Endian, irrespective of platform
+ */
+struct ipsec_decap_pdb {
+ uint32_t options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ uint32_t anti_replay[4];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_decap_pdb(struct program *program,
+ struct ipsec_decap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int i, ars;
+
+ __rta_out32(program, pdb->options);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ __rta_out32(program, pdb->cbc.rsvd[0]);
+ __rta_out32(program, pdb->cbc.rsvd[1]);
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, (uint8_t *)&pdb->ctr.ctr_nonce, 4);
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ break;
+ }
+
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (pdb->options & PDBOPTS_ESP_ARS_MASK) {
+ case PDBOPTS_ESP_ARS128:
+ ars = 4;
+ break;
+ case PDBOPTS_ESP_ARS64:
+ ars = 2;
+ break;
+ case PDBOPTS_ESP_ARS32:
+ ars = 1;
+ break;
+ case PDBOPTS_ESP_ARSNONE:
+ default:
+ ars = 0;
+ break;
+ }
+
+ for (i = 0; i < ars; i++)
+ __rta_out_be32(program, pdb->anti_replay[i]);
+
+ return start_pc;
+}
+
+/**
+ * enum ipsec_icv_size - Type selectors for icv size in IPsec protocol
+ * @IPSEC_ICV_MD5_SIZE: full-length MD5 ICV
+ * @IPSEC_ICV_MD5_TRUNC_SIZE: truncated MD5 ICV
+ */
+enum ipsec_icv_size {
+ IPSEC_ICV_MD5_SIZE = 16,
+ IPSEC_ICV_MD5_TRUNC_SIZE = 12
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ * IPSEC_N_* defines are for IPsec new mode.
+ */
+
+/**
+ * IPSEC_DPOVRD_USE - DPOVRD will override values specified in the PDB
+ */
+#define IPSEC_DPOVRD_USE BIT(31)
+
+/**
+ * IPSEC_DPOVRD_ECN_SHIFT - Explicit Congestion Notification
+ *
+ * If set, MSB of the 4 bits indicates that the 2 LSBs will replace the ECN bits
+ * in the IP header.
+ */
+#define IPSEC_DPOVRD_ECN_SHIFT 24
+
+/**
+ * IPSEC_DPOVRD_ECN_MASK - See IPSEC_DPOVRD_ECN_SHIFT
+ */
+#define IPSEC_DPOVRD_ECN_MASK (0xf << IPSEC_ENCAP_DPOVRD_ECN_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_SHIFT - The length (in bytes) of the portion of the
+ * IP header that is not encrypted
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_SHIFT 16
+
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_MASK - See IPSEC_DPOVRD_IP_HDR_LEN_SHIFT
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_MASK (0xff << IPSEC_DPOVRD_IP_HDR_LEN_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_SHIFT - The location of the next header field within
+ * the IP header of the transport mode packet
+ *
+ * Encap:
+ * ESP_Trailer_NH <-- IP_Hdr[DPOVRD[NH_OFFSET]]
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- DPOVRD[NH]
+ *Decap:
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- ESP_Trailer_NH
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_SHIFT 8
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_MASK (0xff << IPSEC_DPOVRD_NH_OFFSET_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ * Valid only for encapsulation.
+ */
+#define IPSEC_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT - Outer IP header Material length (encap)
+ * Valid only if L2_COPY is not set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT 16
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK \
+ (0xfff << IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT - L2 header length
+ * Valid only if L2_COPY is set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT 16
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK \
+ (0xff << IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIMIF - Outer IP header Material in Input Frame
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIMIF BIT(15)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_COPY - L2 header present in input frame
+ *
+ * Note: For Era <= 8, this bit is reserved (not used) by HW.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_COPY BIT(14)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (encap)
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT 8
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK \
+ (0x3c << IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_NH_MASK - Next Header
+ *
+ * Used in the Next Header field of the encapsulated payload.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT 12
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK \
+ (0xff << IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK - Outer IP header Material length (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK 0xfff
+
+static inline void __gen_auth_key(struct program *program,
+ struct alginfo *authdata)
+{
+ uint32_t dkp_protid;
+
+ switch (authdata->algtype & OP_PCL_IPSEC_AUTH_MASK) {
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ dkp_protid = OP_PCLID_DKP_MD5;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ dkp_protid = OP_PCLID_DKP_SHA1;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ dkp_protid = OP_PCLID_DKP_SHA256;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ dkp_protid = OP_PCLID_DKP_SHA384;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ dkp_protid = OP_PCLID_DKP_SHA512;
+ break;
+ default:
+ KEY(program, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ return;
+ }
+
+ if (authdata->key_type == RTA_DATA_PTR)
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_PTR,
+ OP_PCL_DKP_DST_PTR, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+ else
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_IMM,
+ OP_PCL_DKP_DST_IMM, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, share, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap - IPSec ESP decapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, share, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP encapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the encapsulation output packet.
+ * The descriptor performs DES-CBC/3DES-CBC & HMAC-MD5-96 and then rereads
+ * the input packet to do the AES-XCBC-MAC-96 calculation and to overwrite
+ * the MD5 ICV.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(hdr);
+ LABEL(shd_ptr);
+ LABEL(keyjump);
+ LABEL(outptr);
+ LABEL(swapped_seqin_fields);
+ LABEL(swapped_seqin_ptr);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_outlen);
+ REFERENCE(move_seqout_ptr);
+ REFERENCE(swapped_seqin_ptr_jump);
+ REFERENCE(write_swapped_seqin_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from below in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ IMMED);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+ /* Swap SEQINPTR to SEQOUTPTR. */
+ move_seqout_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, AND, ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR), MATH1,
+ 8, IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xa00000e5, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqin_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqin_ptr_jump = JUMP(p, swapped_seqin_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ SEQOUTPTR(p, 0, 65535, RTO);
+ move_outlen = MOVE(p, DESCBUF, 0, MATH0, 4, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH0, SUB,
+ (uint64_t)(pdb->ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE),
+ VSEQINSZ, 4, IMMED2);
+ MATHB(p, MATH0, SUB, IPSEC_ICV_MD5_TRUNC_SIZE, VSEQOUTSZ, 4, IMMED2);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ SEQFIFOLOAD(p, SKIP, pdb->ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1 | LAST1);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT1, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the Shared Descriptor Pointer */
+ SET_LABEL(p, shd_ptr);
+ shd_ptr += 1;
+ /* Label the Output Pointer */
+ SET_LABEL(p, outptr);
+ outptr += 3;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqin_fields);
+ swapped_seqin_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqin_ptr);
+ swapped_seqin_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, swapped_seqin_ptr_jump, swapped_seqin_ptr);
+ PATCH_MOVE(p, move_outlen, outptr);
+ PATCH_MOVE(p, move_seqout_ptr, shd_ptr);
+ PATCH_MOVE(p, write_swapped_seqin_ptr, swapped_seqin_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP decapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the decapsulation input packet.
+ * The descriptor computes the AES-XCBC-MAC-96 to check if the received ICV
+ * is correct, rereads the input packet to compute the MD5 ICV, overwrites
+ * the XCBC ICV, and then sends the modified input packet to the
+ * DES-CBC/3DES-CBC & HMAC-MD5-96 IPsec.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ip_hdr_len = (pdb->options & PDBHDRLEN_MASK) >>
+ PDBHDRLEN_ESP_DECAP_SHIFT;
+
+ LABEL(hdr);
+ LABEL(jump_cmd);
+ LABEL(keyjump);
+ LABEL(outlen);
+ LABEL(seqin_ptr);
+ LABEL(seqout_ptr);
+ LABEL(swapped_seqout_fields);
+ LABEL(swapped_seqout_ptr);
+ REFERENCE(seqout_ptr_jump);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_jump);
+ REFERENCE(move_jump_back);
+ REFERENCE(move_seqin_ptr);
+ REFERENCE(swapped_seqout_ptr_jump);
+ REFERENCE(write_swapped_seqout_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from bellow in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), MATH0, 4,
+ IMMED2);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_MD5, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1);
+ SEQFIFOLOAD(p, ICV1, IPSEC_ICV_MD5_TRUNC_SIZE, FLUSH1 | LAST1);
+ /* Swap SEQOUTPTR to SEQINPTR. */
+ move_seqin_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 8,
+ IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xA00000e1, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqout_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqout_ptr_jump = JUMP(p, swapped_seqout_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load
+ * a command that is also written by RTA).
+ * Change when proper RTA support is added.
+ */
+ SET_LABEL(p, jump_cmd);
+ WORD(p, 0xA00000f3);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH0, ADD, ip_hdr_len, VSEQOUTSZ, 4, IMMED2);
+ move_jump = MOVE(p, DESCBUF, 0, OFIFO, 0, 8, WAITCOMP | IMMED);
+ move_jump_back = MOVE(p, OFIFO, 0, DESCBUF, 0, 8, IMMED);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT2, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+ seqout_ptr_jump = JUMP(p, seqout_ptr, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_CLR_C2MODE |
+ CLRW_CLR_C2DATAS | CLRW_CLR_C2CTX | CLRW_RESET_CLS1_CHA, CLRW, 0,
+ 4, 0);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, ADD,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), SEQINSZ, 4,
+ IMMED2);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the SEQ OUT PTR */
+ SET_LABEL(p, seqout_ptr);
+ seqout_ptr += 2;
+ /* Label the Output Length */
+ SET_LABEL(p, outlen);
+ outlen += 4;
+ /* Label the SEQ IN PTR */
+ SET_LABEL(p, seqin_ptr);
+ seqin_ptr += 5;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqout_fields);
+ swapped_seqout_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqout_ptr);
+ swapped_seqout_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, seqout_ptr_jump, seqout_ptr);
+ PATCH_JUMP(p, swapped_seqout_ptr_jump, swapped_seqout_ptr);
+ PATCH_MOVE(p, move_jump, jump_cmd);
+ PATCH_MOVE(p, move_jump_back, seqin_ptr);
+ PATCH_MOVE(p, move_seqin_ptr, outlen);
+ PATCH_MOVE(p, write_swapped_seqout_ptr, swapped_seqout_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or keys can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (12 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor
+ * length for the case of
+ * NULL encryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or key can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (11 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_encap - IPSec new mode ESP encapsulation
+ * protocol-level shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the encapsulation PDB.
+ * @opt_ip_hdr: pointer to Optional IP Header
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_INL, opt_ip_hdr points to the buffer to
+ * be inlined in the PDB. Number of bytes (buffer size) copied is provided
+ * in pdb->ip_hdr_len.
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_REF, opt_ip_hdr points to the address of
+ * the Optional IP Header. The address will be inlined in the PDB verbatim.
+ * -for other values of OIHI options field, opt_ip_hdr is not used.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Note: L2 header copy functionality is implemented assuming that bits 14
+ * (currently reserved) and 16-23 (part of Outer IP Header Material Length)
+ * in DPOVRD register are not used (which is usually the case when L3 header
+ * is provided in PDB).
+ * When DPOVRD[14] is set, frame starts with an L2 header; in this case, the
+ * L2 header length is found at DPOVRD[23:16]. SEC uses this length to copy
+ * the header and then it deletes DPOVRD[23:16] (so there is no side effect
+ * when later running IPsec protocol).
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
+ bool swap,
+ enum rta_share_type share,
+ struct ipsec_encap_pdb *pdb,
+ uint8_t *opt_ip_hdr,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+ LABEL(l2copy);
+ REFERENCE(pl2copy);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode encap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, share, hdr, 0);
+
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+
+ switch (pdb->options & PDBOPTS_ESP_OIHI_MASK) {
+ case PDBOPTS_ESP_OIHI_PDB_INL:
+ COPY_DATA(p, opt_ip_hdr, pdb->ip_hdr_len);
+ break;
+ case PDBOPTS_ESP_OIHI_PDB_REF:
+ if (ps)
+ COPY_DATA(p, opt_ip_hdr, 8);
+ else
+ COPY_DATA(p, opt_ip_hdr, 4);
+ break;
+ default:
+ break;
+ }
+ SET_LABEL(p, hdr);
+
+ MATHB(p, DPOVRD, AND, IPSEC_N_ENCAP_DPOVRD_L2_COPY, NONE, 4, IMMED2);
+ pl2copy = JUMP(p, l2copy, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ MATHI(p, DPOVRD, RSHIFT, IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT, VSEQOUTSZ,
+ 1, 0);
+ MATHB(p, DPOVRD, AND, ~IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK, DPOVRD, 4,
+ IMMED2);
+ /* TODO: CLASS2 corresponds to AUX=2'b10; add more intuitive defines */
+ SEQFIFOSTORE(p, METADATA, 0, 0, CLASS2 | VLF);
+ SET_LABEL(p, l2copy);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pl2copy, l2copy);
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_DEC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor
+ * length for the case of
+ * NULL decryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_DEC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_decap - IPSec new mode ESP decapsulation protocol-level
+ * shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values 0 one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
+ bool swap,
+ enum rta_share_type share,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode decap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, share, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_AUTH_VAR_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ * for the case of variable-length authentication
+ * only data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_BASE_DESC_LEN (27 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor
+ * length for variable-length authentication only
+ * data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN \
+ (IPSEC_AUTH_VAR_BASE_DESC_LEN + CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_BASE_DESC_LEN (19 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_AES_DEC_BASE_DESC_LEN (IPSEC_AUTH_BASE_DESC_LEN + \
+ CAAM_CMD_SZ)
+
+/**
+ * cnstr_shdsc_authenc - authenc-like descriptor
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
+ * @authdata: pointer to authentication transform definitions.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512}
+ * Note: The key for authentication is supposed to be given as plain text.
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * @ivlen: length of the IV to be read from the input frame, before any data
+ * to be processed
+ *
+ * @trunc_len: the length of the ICV to be written to the output frame. If 0,
+ * then the corresponding length of the digest, according to the
+ * selected algorithm shall be used.
+ * @dir: Protocol direction, encapsulation or decapsulation (DIR_ENC/DIR_DEC)
+ *
+ * Note: Here's how the input frame needs to be formatted so that the processing
+ * will be done correctly:
+ * For encapsulation:
+ * Input:
+ * +----+----------------+-----------------------------------------------+
+ * | IV | Auth-only head | Padded data to be auth & Enc | Auth-only tail |
+ * +----+----------------+-----------------------------------------------+
+ * Output:
+ * +--------------------------------------+
+ * | Authenticated & Encrypted data | ICV |
+ * +--------------------------------+-----+
+ *
+ * For decapsulation:
+ * Input:
+ * +----+----------------+-----------------+----------------------+
+ * | IV | Auth-only head | Auth & Enc data | Auth-only tail | ICV |
+ * +----+----------------+-----------------+----------------------+
+ * Output:
+ * +----+---------------------------+
+ * | Decrypted & authenticated data |
+ * +----+---------------------------+
+ *
+ * Note: This descriptor can use per-packet commands, encoded as below in the
+ * DPOVRD register:
+ * 32 28 16 1
+ * +------+------------------------------+
+ * | 0x8 | auth_tail_len | auth_hdr_len |
+ * +------+------------------------------+
+ *
+ * This mechanism is available only for SoCs having SEC ERA >= 3. In other
+ * words, this will not work for P4080TO2
+ *
+ * Note: The descriptor does not add any kind of padding to the input data,
+ * so the upper layer needs to ensure that the data is padded properly,
+ * according to the selected cipher. Failure to do so will result in
+ * the descriptor failing with a data-size error.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ uint16_t ivlen,
+ uint8_t trunc_len, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
+ int data_type;
+
+ LABEL(keyjmp);
+ LABEL(skipkeys);
+ LABEL(proc_icv);
+ LABEL(no_auth_tail);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipkeys);
+ REFERENCE(p_proc_icv);
+ REFERENCE(p_no_auth_tail);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ /*
+ * Since we currently assume that key length is equal to hash digest
+ * size, it's ok to truncate keylen value.
+ */
+ trunc_len = trunc_len && (trunc_len < authdata->keylen) ?
+ trunc_len : (uint8_t)authdata->keylen;
+
+ SHR_HDR(p, share, 1, SC);
+
+ /* Collect the (auth_tail || auth_hdr) len from DPOVRD */
+ MATHB(p, DPOVRD, ADD, 0x80000000, MATH2, 4, IMMED2);
+
+ /* Get auth_hdr len in MATH0 */
+ MATHB(p, MATH2, AND, 0xFFFF, MATH0, 4, IMMED2);
+
+ /* Get auth_tail len in MATH2 */
+ MATHB(p, MATH2, AND, 0xFFF0000, MATH2, 4, IMMED2);
+ MATHI(p, MATH2, RSHIFT, 16, MATH2, 4, IMMED2);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (need_dk)
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
+
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (need_dk) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipkeys);
+ } else {
+ SET_LABEL(p, skipkeys);
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ /* Read IV */
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+ else
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ /*
+ * authenticate auth_hdr data
+ */
+ MATHB(p, MATH0, ADD, ZERO, VSEQINSZ, 4, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF);
+
+ /*
+ * Prepare the length of the data to be both encrypted/decrypted
+ * and authenticated/checked
+ */
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ if (dir == DIR_DEC) {
+ MATHB(p, VSEQINSZ, SUB, trunc_len, VSEQINSZ, 4, IMMED2);
+ data_type = MSGINSNOOP;
+ } else {
+ data_type = MSGOUTSNOOP;
+ }
+
+ MATHB(p, VSEQINSZ, ADD, ZERO, VSEQOUTSZ, 4, 0);
+
+ /* Prepare for writing the output frame */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+
+ /* Check if there is no auth-tail */
+ MATHB(p, MATH2, ADD, ZERO, MATH2, 4, 0);
+ p_no_auth_tail = JUMP(p, no_auth_tail, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ /*
+ * Read input plain/cipher text, encrypt/decrypt & auth & write
+ * to output
+ */
+ SEQFIFOLOAD(p, data_type, 0, VLF | LAST1 | FLUSH1);
+
+ /* Authenticate auth tail */
+ MATHB(p, MATH2, ADD, ZERO, VSEQINSZ, 4, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+
+ /* Jump to process icv */
+ p_proc_icv = JUMP(p, proc_icv, LOCAL_JUMP, ALL_FALSE, MATH_Z);
+
+ SET_LABEL(p, no_auth_tail);
+
+ SEQFIFOLOAD(p, data_type, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ SET_LABEL(p, proc_icv);
+
+ if (dir == DIR_ENC)
+ /* Finally, write the ICV */
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+ else
+ /* Read the ICV to check */
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+ PATCH_JUMP(p, p_no_auth_tail, no_auth_tail);
+ PATCH_JUMP(p, p_proc_icv, proc_icv);
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_IPSEC_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
new file mode 100644
index 000000000..b5e2d24e4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -0,0 +1,3753 @@
+/* SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
+ * Copyright 2008-2013 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
+ */
+
+#ifndef __DESC_PDCP_H__
+#define __DESC_PDCP_H__
+
+#include "rta.h"
+#include "common.h"
+
+/**
+ * DOC: PDCP Shared Descriptor Constructors
+ *
+ * Shared descriptors for PDCP protocol.
+ */
+
+/**
+ * PDCP_NULL_MAX_FRAME_LEN - The maximum frame frame length that is supported by
+ * PDCP NULL protocol.
+ */
+#define PDCP_NULL_MAX_FRAME_LEN 0x00002FFF
+
+/**
+ * PDCP_MAC_I_LEN - The length of the MAC-I for PDCP protocol operation
+ */
+#define PDCP_MAC_I_LEN 0x00000004
+
+/**
+ * PDCP_MAX_FRAME_LEN_STATUS - The status returned in FD status/command field in
+ * case the input frame is larger than
+ * PDCP_NULL_MAX_FRAME_LEN.
+ */
+#define PDCP_MAX_FRAME_LEN_STATUS 0xF1
+
+/**
+ * PDCP_C_PLANE_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the PDCP
+ * Control Plane header. For PDCP Control Plane, the SN
+ * is constant (5 bits) as opposed to PDCP Data Plane
+ * (7/12/15 bits).
+ */
+#define PDCP_C_PLANE_SN_MASK 0x1F000000
+#define PDCP_C_PLANE_SN_MASK_BE 0x0000001F
+
+/**
+ * PDCP_12BIT_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the
+ * PDCP User Plane header.
+ */
+#define PDCP_12BIT_SN_MASK 0xFF0F0000
+#define PDCP_12BIT_SN_MASK_BE 0x00000FFF
+
+/**
+ * PDCP_U_PLANE_15BIT_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the
+ * PDCP User Plane header. For PDCP Control Plane,
+ * the SN is constant (5 bits) as opposed to PDCP
+ * Data Plane (7/12/15 bits).
+ */
+#define PDCP_U_PLANE_15BIT_SN_MASK 0xFF7F0000
+#define PDCP_U_PLANE_15BIT_SN_MASK_BE 0x00007FFF
+
+/**
+ * PDCP_U_PLANE_18BIT_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the
+ * PDCP User Plane header.
+ */
+#define PDCP_U_PLANE_18BIT_SN_MASK 0xFFFF0300
+#define PDCP_U_PLANE_18BIT_SN_MASK_BE 0x0003FFFF
+
+/**
+ * PDCP_BEARER_MASK - This mask is used masking out the bearer for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Count-C (32 bit) | Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the upper 64 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The lower 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_BEARER_MASK 0x00000004FFFFFFFFull
+#define PDCP_BEARER_MASK_BE 0xFFFFFFFF04000000ull
+
+/**
+ * PDCP_DIR_MASK - This mask is used masking out the direction for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the lower 32 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The upper 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_DIR_MASK 0x00000000000000F8ull
+#define PDCP_DIR_MASK_BE 0xF800000000000000ull
+
+/**
+ * PDCP_NULL_INT_MAC_I_VAL - The value of the PDCP PDU MAC-I in case NULL
+ * integrity is used.
+ */
+
+#define PDCP_NULL_INT_MAC_I_VAL 0x00000000
+
+/**
+ * PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS - The status used to report ICV check
+ * failed in case of NULL integrity
+ * Control Plane processing.
+ */
+#define PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS 0x0A
+/**
+ * PDCP_DPOVRD_HFN_OV_EN - Value to be used in the FD status/cmd field to
+ * indicate the HFN override mechanism is active for the
+ * frame.
+ */
+#define PDCP_DPOVRD_HFN_OV_EN 0x80000000
+
+/**
+ * PDCP_P4080REV2_HFN_OV_BUFLEN - The length in bytes of the supplementary space
+ * that must be provided by the user at the
+ * beginning of the input frame buffer for
+ * P4080 REV 2.
+ *
+ * The format of the frame buffer is the following:
+ *
+ * |<---PDCP_P4080REV2_HFN_OV_BUFLEN-->|
+ * //===================================||============||==============\\
+ * || PDCP_DPOVRD_HFN_OV_EN | HFN value || PDCP Header|| PDCP Payload ||
+ * \\===================================||============||==============//
+ *
+ * If HFN override mechanism is not desired, then the MSB of the first 4 bytes
+ * must be set to 0b.
+ */
+#define PDCP_P4080REV2_HFN_OV_BUFLEN 4
+
+/**
+ * enum cipher_type_pdcp - Type selectors for cipher types in PDCP protocol OP
+ * instructions.
+ * @PDCP_CIPHER_TYPE_NULL: NULL
+ * @PDCP_CIPHER_TYPE_SNOW: SNOW F8
+ * @PDCP_CIPHER_TYPE_AES: AES
+ * @PDCP_CIPHER_TYPE_ZUC: ZUCE
+ * @PDCP_CIPHER_TYPE_INVALID: invalid option
+ */
+enum cipher_type_pdcp {
+ PDCP_CIPHER_TYPE_NULL,
+ PDCP_CIPHER_TYPE_SNOW,
+ PDCP_CIPHER_TYPE_AES,
+ PDCP_CIPHER_TYPE_ZUC,
+ PDCP_CIPHER_TYPE_INVALID
+};
+
+/**
+ * enum auth_type_pdcp - Type selectors for integrity types in PDCP protocol OP
+ * instructions.
+ * @PDCP_AUTH_TYPE_NULL: NULL
+ * @PDCP_AUTH_TYPE_SNOW: SNOW F9
+ * @PDCP_AUTH_TYPE_AES: AES CMAC
+ * @PDCP_AUTH_TYPE_ZUC: ZUCA
+ * @PDCP_AUTH_TYPE_INVALID: invalid option
+ */
+enum auth_type_pdcp {
+ PDCP_AUTH_TYPE_NULL,
+ PDCP_AUTH_TYPE_SNOW,
+ PDCP_AUTH_TYPE_AES,
+ PDCP_AUTH_TYPE_ZUC,
+ PDCP_AUTH_TYPE_INVALID
+};
+
+/**
+ * enum pdcp_dir - Type selectors for direction for PDCP protocol
+ * @PDCP_DIR_UPLINK: uplink direction
+ * @PDCP_DIR_DOWNLINK: downlink direction
+ * @PDCP_DIR_INVALID: invalid option
+ */
+enum pdcp_dir {
+ PDCP_DIR_UPLINK = 0,
+ PDCP_DIR_DOWNLINK = 1,
+ PDCP_DIR_INVALID
+};
+
+/**
+ * enum pdcp_plane - PDCP domain selectors
+ * @PDCP_CONTROL_PLANE: Control Plane
+ * @PDCP_DATA_PLANE: Data Plane
+ * @PDCP_SHORT_MAC: Short MAC
+ */
+enum pdcp_plane {
+ PDCP_CONTROL_PLANE,
+ PDCP_DATA_PLANE,
+ PDCP_SHORT_MAC
+};
+
+/**
+ * enum pdcp_sn_size - Sequence Number Size selectors for PDCP protocol
+ * @PDCP_SN_SIZE_5: 5bit sequence number
+ * @PDCP_SN_SIZE_7: 7bit sequence number
+ * @PDCP_SN_SIZE_12: 12bit sequence number
+ * @PDCP_SN_SIZE_15: 15bit sequence number
+ * @PDCP_SN_SIZE_18: 18bit sequence number
+ */
+enum pdcp_sn_size {
+ PDCP_SN_SIZE_5 = 5,
+ PDCP_SN_SIZE_7 = 7,
+ PDCP_SN_SIZE_12 = 12,
+ PDCP_SN_SIZE_15 = 15,
+ PDCP_SN_SIZE_18 = 18
+};
+
+/*
+ * PDCP Control Plane Protocol Data Blocks
+ */
+#define PDCP_C_PLANE_PDB_HFN_SHIFT 5
+#define PDCP_C_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_C_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_C_PLANE_PDB_HFN_THR_SHIFT 5
+
+#define PDCP_U_PLANE_PDB_OPT_SHORT_SN 0x2
+#define PDCP_U_PLANE_PDB_OPT_15B_SN 0x4
+#define PDCP_U_PLANE_PDB_OPT_18B_SN 0x6
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT 15
+#define PDCP_U_PLANE_PDB_18BIT_SN_HFN_SHIFT 18
+#define PDCP_U_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_U_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT 15
+#define PDCP_U_PLANE_PDB_18BIT_SN_HFN_THR_SHIFT 18
+
+struct pdcp_pdb {
+ union {
+ uint32_t opt;
+ uint32_t rsvd;
+ } opt_res;
+ uint32_t hfn_res; /* HyperFrame number,(27, 25 or 21 bits),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t bearer_dir_res;/* Bearer(5 bits), packet direction (1 bit),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t hfn_thr_res; /* HyperFrame number threshold (27, 25 or 21
+ * bits), left aligned & right-padded with
+ * zeros.
+ */
+};
+
+/*
+ * PDCP internal PDB types
+ */
+enum pdb_type_e {
+ PDCP_PDB_TYPE_NO_PDB,
+ PDCP_PDB_TYPE_FULL_PDB,
+ PDCP_PDB_TYPE_REDUCED_PDB,
+ PDCP_PDB_TYPE_INVALID
+};
+
+/*
+ * Function for appending the portion of a PDCP Control Plane shared descriptor
+ * which performs NULL encryption and integrity (i.e. copies the input frame
+ * to the output frame, appending 32 bits of zeros at the end (MAC-I for
+ * NULL integrity).
+ */
+static inline int
+pdcp_insert_cplane_null_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size __maybe_unused,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ } else {
+ MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
+ IMMED2);
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
+ }
+
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+ else
+ MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /* Placeholder for MOVE command with length from M1 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+insert_copy_frame_op(struct program *p,
+ struct alginfo *cipherdata __maybe_unused,
+ unsigned int dir __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IFB | IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M0 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_int_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata, unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ /* 12 bit SN is only supported for protocol offload case */
+ if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_12) {
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER_RN,
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+
+ }
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_SNOW:
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, length, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK, MATH2, 8,
+ IMMED2);
+ MOVEB(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK, MATH3, 8, IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ } else {
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK_BE, MATH2, 8,
+ IMMED2);
+
+ MOVE(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK_BE, MATH3, 8,
+ IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /*
+ * Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ /* Insert Auth Key */
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, length, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ } else {
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /*
+ * Placeholder for MOVE command with length from
+ * M1 register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
+ else
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ SEQINPTR(p, 0, length, RTO);
+ if (swap == false) {
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+
+ } else {
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+ }
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "pdcp_insert_cplane_int_only_op", authdata->algtype);
+ return -EINVAL;
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_enc_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18 &&
+ !(rta_sec_era == RTA_SEC_ERA_8 &&
+ authdata->algtype == 0))
+ || (rta_sec_era == RTA_SEC_ERA_10)) {
+ if (sn_size == PDCP_SN_SIZE_5)
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ (uint16_t)cipherdata->algtype << 8);
+ else
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER_RN,
+ (uint16_t)cipherdata->algtype << 8);
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_12:
+ offset = 6;
+ length = 2;
+ sn_mask = (swap == false) ? PDCP_12BIT_SN_MASK :
+ PDCP_12BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+ }
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+ SEQSTORE(p, MATH0, offset, length, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVEB(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_cplane_enc_only_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ FIFOLOAD(p, MSG1, PDCP_NULL_INT_MAC_I_VAL, 4,
+ LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, OFIFO, 0, MATH1, 4, PDCP_MAC_I_LEN, WAITCOMP | IMMED);
+ MATHB(p, MATH1, XOR, PDCP_NULL_INT_MAC_I_VAL, NONE, 4, IMMED2);
+ JUMP(p, PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS,
+ HALT_STATUS, ALL_FALSE, MATH_Z);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_snow_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+ int pclid;
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVEB(p, MATH0, offset, IFIFOAB2, 0, length, IMMED);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ SEQSTORE(p, MATH0, offset, length, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVEB(p, MATH1, 0, CONTEXT1, 0, 8, IMMED);
+ MOVEB(p, MATH1, 0, CONTEXT2, 0, 4, WAITCOMP | IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_BEARER_MASK),
+ MATH2, 4, IMMED2);
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_DIR_MASK),
+ MATH3, 4, IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE),
+ MATH2, 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE),
+ MATH3, 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+
+ MOVEB(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ } else {
+ MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ else
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
+
+ if (rta_sec_era <= RTA_SEC_ERA_2) {
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
+ } else {
+ MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_zuc_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ SET_LABEL(p, keyjump);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+ int pclid;
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+ }
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVEB(p, MATH0, offset, IFIFOAB2, 0, length, IMMED);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, offset, length, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVEB(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVEB(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_aes_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18)) {
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER_RN,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+
+ default:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+ }
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, offset, length, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+ MOVEB(p, MATH0, offset, IFIFOAB1, 0, length, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVEB(p, MATH2, 0, CONTEXT1, 16, 8, IMMED);
+ SEQINPTR(p, 0, PDCP_NULL_MAX_FRAME_LEN, RTO);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SEQFIFOLOAD(p, SKIP, length, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ MOVEB(p, MATH2, 0, CONTEXT1, 16, 8, IMMED);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQINPTR(p, 0, 0, SOP);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVEB(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_acc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_hfn_ovrd __maybe_unused)
+{
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL,
+ (uint16_t)cipherdata->algtype);
+ else
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER_RN,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ LABEL(back_to_sd_offset);
+ LABEL(end_desc);
+ LABEL(local_offset);
+ LABEL(jump_to_beginning);
+ LABEL(fifo_load_mac_i_offset);
+ REFERENCE(seqin_ptr_read);
+ REFERENCE(seqin_ptr_write);
+ REFERENCE(seq_out_read);
+ REFERENCE(jump_back_to_sd_cmd);
+ REFERENCE(move_mac_i_to_desc_buf);
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+ (rta_sec_era == RTA_SEC_ERA_10)) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ else
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER_RN,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+
+ }
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, offset, length, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, length, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ /*
+ * Note: Although the calculations below might seem a
+ * little off, the logic is the following:
+ *
+ * - SEQ IN PTR RTO below needs the full length of the
+ * frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
+ * this means the length of the frame to be processed
+ * + 4 bytes (the HFN override flag and value).
+ * The length of the frame to be processed minus 1
+ * byte is in the VSIL register (because
+ * VSIL = SIL + 3, due to 1 byte, the header being
+ * already written by the SEQ STORE above). So for
+ * calculating the length to use in RTO, I add one
+ * to the VSIL value in order to obtain the total
+ * frame length. This helps in case of P4080 which
+ * can have the value 0 as an operand in a MATH
+ * command only as SRC1 When the HFN override
+ * workaround is not enabled, the length of the
+ * frame is given by the SIL register; the
+ * calculation is similar to the one in the SEC 4.2
+ * and SEC 5.3 cases.
+ */
+ if (era_2_sw_hfn_ovrd)
+ MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
+ 0);
+ else
+ MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
+ 0);
+ }
+ /*
+ * Placeholder for filling the length in
+ * SEQIN PTR RTO below
+ */
+ seqin_ptr_read = MOVE(p, DESCBUF, 0, MATH1, 0, 6, IMMED);
+ seqin_ptr_write = MOVE(p, MATH1, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, local_offset);
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, 0, RTO);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ SEQFIFOLOAD(p, SKIP, 5, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
+ }
+
+ MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0))
+ SEQFIFOLOAD(p, SKIP, length, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ PATCH_MOVE(p, seqin_ptr_read, local_offset);
+ PATCH_MOVE(p, seqin_ptr_write, local_offset);
+ } else {
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_5)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+
+ MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load a
+ * command that is also written by RTA (or patch it for that matter).
+ * Change when proper RTA support is added.
+ */
+ if (p->ps)
+ WORD(p, 0x168B0004);
+ else
+ WORD(p, 0x16880404);
+
+ jump_back_to_sd_cmd = JUMP(p, 0, LOCAL_JUMP, ALL_TRUE, 0);
+ /*
+ * Placeholder for command reading the SEQ OUT command in
+ * JD. Done for rereading the decrypted data and performing
+ * the integrity check
+ */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 20,
+ WAITCOMP | IMMED);
+ else
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 16,
+ WAITCOMP | IMMED);
+
+ MATHB(p, MATH1, XOR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 4,
+ IMMED2);
+ /* Placeholder for overwriting the SEQ IN with SEQ OUT */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ MOVE(p, MATH1, 0, DESCBUF, 0, 24, IMMED);
+ else
+ MOVE(p, MATH1, 0, DESCBUF, 0, 20, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_4)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
+ 4, WAITCOMP | IMMED);
+ else
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ /*
+ * Placeholder for jump in SD for executing the new SEQ IN PTR
+ * command (which is actually the old SEQ OUT PTR command
+ * copied over from JD.
+ */
+ SET_LABEL(p, jump_to_beginning);
+ JUMP(p, 1 - jump_to_beginning, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, back_to_sd_offset);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ /* Read the # of bytes written in the output buffer + 1 (HDR) */
+ MATHI(p, VSEQOUTSZ, ADD, length, VSEQINSZ, 4, IMMED2);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era >= RTA_SEC_ERA_4) {
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ } else {
+ SET_LABEL(p, fifo_load_mac_i_offset);
+ FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
+ LAST1 | FLUSH1 | IMMED);
+ }
+
+ SET_LABEL(p, end_desc);
+
+ if (!p->ps) {
+ PATCH_MOVE(p, seq_out_read, end_desc + 1);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ } else {
+ PATCH_MOVE(p, seq_out_read, end_desc + 2);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+ (rta_sec_era == RTA_SEC_ERA_10)) {
+ int pclid;
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVEB(p, MATH0, offset, IFIFOAB2, 0, length, IMMED);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ SEQSTORE(p, MATH0, offset, length, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVEB(p, MATH1, 0, CONTEXT1, 16, 8, IMMED);
+ MOVEB(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_BEARER_MASK), MATH2, 4,
+ IMMED2);
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_DIR_MASK), MATH3, 4,
+ IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVEB(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4, IMMED2);
+
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ else
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
+
+ if (rta_sec_era <= RTA_SEC_ERA_2) {
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
+ } else {
+ MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ SET_LABEL(p, keyjump);
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+ (rta_sec_era == RTA_SEC_ERA_10)) {
+ int pclid;
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+
+ }
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVEB(p, MATH0, offset, IFIFOAB2, 0, length, IMMED);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, offset, length, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+ (rta_sec_era == RTA_SEC_ERA_10)) {
+ int pclid;
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVEB(p, MATH0, offset, IFIFOAB2, 0, length, IMMED);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT1, 16, 8, IMMED);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, offset, length, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+ (rta_sec_era == RTA_SEC_ERA_10)) {
+ int pclid;
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+
+ }
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVEB(p, MATH0, offset, IFIFOAB2, 0, length, IMMED);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVEB(p, MATH1, 0, CONTEXT1, 0, 8, IMMED);
+ MOVEB(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_BEARER_MASK), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_DIR_MASK), MATH3,
+ 4, IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVEB(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ }
+
+ SEQSTORE(p, MATH0, offset, length, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ /*
+ * For SEC ERA 6, there's a problem with the OFIFO
+ * pointer, and thus it needs to be reset here before
+ * moving to M0.
+ */
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ /* Put ICV to M0 before sending it to C2 for comparison. */
+ MOVEB(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVEB(p, MATH0, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ uint32_t offset = 0, length = 0, sn_mask = 0;
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+ (rta_sec_era == RTA_SEC_ERA_10)) {
+ int pclid;
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ if (sn_size == PDCP_SN_SIZE_5)
+ pclid = OP_PCLID_LTE_PDCP_CTRL_MIXED;
+ else
+ pclid = OP_PCLID_LTE_PDCP_USER_RN;
+
+ PROTOCOL(p, dir, pclid,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+ /* Non-proto is supported only for 5bit cplane and 18bit uplane */
+ switch (sn_size) {
+ case PDCP_SN_SIZE_5:
+ offset = 7;
+ length = 1;
+ sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
+ PDCP_C_PLANE_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_18:
+ offset = 5;
+ length = 3;
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ break;
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ case PDCP_SN_SIZE_15:
+ pr_err("Invalid sn_size for %s\n", __func__);
+ return -ENOTSUP;
+ }
+
+ SEQLOAD(p, MATH0, offset, length, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, offset, length, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+ MOVEB(p, MATH0, offset, IFIFOAB1, 0, length, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, PDCP_NULL_MAX_FRAME_LEN, RTO);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SEQFIFOLOAD(p, SKIP, length, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQINPTR(p, 0, 0, SOP);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVEB(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_no_int_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ unsigned int dir,
+ enum pdcp_sn_size sn_size)
+{
+ int op;
+ uint32_t sn_mask;
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_15) ||
+ (rta_sec_era >= RTA_SEC_ERA_10)) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ return 0;
+ }
+
+ if (sn_size == PDCP_SN_SIZE_15) {
+ SEQLOAD(p, MATH0, 6, 2, 0);
+ sn_mask = (swap == false) ? PDCP_U_PLANE_15BIT_SN_MASK :
+ PDCP_U_PLANE_15BIT_SN_MASK_BE;
+ } else { /* SN Size == PDCP_SN_SIZE_18 */
+ SEQLOAD(p, MATH0, 5, 3, 0);
+ sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
+ PDCP_U_PLANE_18BIT_SN_MASK_BE;
+ }
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, sn_mask, MATH1, 8, IFB | IMMED2);
+
+ if (sn_size == PDCP_SN_SIZE_15)
+ SEQSTORE(p, MATH0, 6, 2, 0);
+ else /* SN Size == PDCP_SN_SIZE_18 */
+ SEQSTORE(p, MATH0, 5, 3, 0);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ op = dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC;
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVEB(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_uplane_15bit_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ return 0;
+}
+
+/*
+ * Function for inserting the snippet of code responsible for creating
+ * the HFN override code via either DPOVRD or via the input frame.
+ */
+static inline int
+insert_hfn_ov_op(struct program *p,
+ uint32_t shift,
+ enum pdb_type_e pdb_type,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
+ uint16_t hfn_pdb_offset;
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
+ return 0;
+
+ switch (pdb_type) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ /*
+ * If there is no PDB, then HFN override mechanism does not
+ * make any sense, thus in this case the function will
+ * return the pointer to the current position in the
+ * descriptor buffer
+ */
+ return 0;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ hfn_pdb_offset = 4;
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ hfn_pdb_offset = 8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
+ } else {
+ SEQLOAD(p, MATH0, 4, 4, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
+ SEQSTORE(p, MATH0, 4, 4, 0);
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+
+ MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ /*
+ * For ERA8, DPOVRD could be handled by the PROTOCOL command
+ * itself. For now, this is not done. Thus, clear DPOVRD here
+ * to alleviate any side-effects.
+ */
+ MATHB(p, DPOVRD, AND, ZERO, DPOVRD, 4, STL);
+
+ SET_LABEL(p, keyjump);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+/*
+ * PDCP Control PDB creation function
+ */
+static inline enum pdb_type_e
+cnstr_pdcp_c_plane_pdb(struct program *p,
+ uint32_t hfn,
+ enum pdcp_sn_size sn_size,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct pdcp_pdb pdb;
+ enum pdb_type_e
+ pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ PDCP_PDB_TYPE_NO_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* AES CTR */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* ZUC-E */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ };
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* To support 12-bit seq numbers, we use u-plane opt in pdb.
+ * SEC supports 5-bit only with c-plane opt in pdb.
+ */
+ if (sn_size == PDCP_SN_SIZE_12) {
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
+
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
+
+ } else {
+ /* This means 5-bit c-plane.
+ * Here we use c-plane opt in pdb
+ */
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+ }
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return PDCP_PDB_TYPE_FULL_PDB;
+ }
+
+ switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ break;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ __rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
+ __rta_out32(p,
+ (uint32_t)((bearer <<
+ PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction <<
+ PDCP_C_PLANE_PDB_DIR_SHIFT)));
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ break;
+
+ default:
+ return PDCP_PDB_TYPE_INVALID;
+ }
+
+ return pdb_mask[cipherdata->algtype][authdata->algtype];
+}
+
+/*
+ * PDCP UPlane PDB creation function
+ */
+static inline enum pdb_type_e
+cnstr_pdcp_u_plane_pdb(struct program *p,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn, unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct pdcp_pdb pdb;
+ enum pdb_type_e pdb_type = PDCP_PDB_TYPE_FULL_PDB;
+ enum pdb_type_e
+ pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ PDCP_PDB_TYPE_NO_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* AES CTR */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* ZUC-E */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ };
+
+ /* Read options from user */
+ /* Depending on sequence number length, the HFN and HFN threshold
+ * have different lengths.
+ */
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ pdb.opt_res.opt |= PDCP_U_PLANE_PDB_OPT_SHORT_SN;
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_12:
+ pdb.opt_res.opt &= (uint32_t)(~PDCP_U_PLANE_PDB_OPT_SHORT_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_15:
+ pdb.opt_res.opt = (uint32_t)(PDCP_U_PLANE_PDB_OPT_15B_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_18:
+ pdb.opt_res.opt = (uint32_t)(PDCP_U_PLANE_PDB_OPT_18B_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_18BIT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_18BIT_SN_HFN_THR_SHIFT;
+
+ if (rta_sec_era <= RTA_SEC_ERA_8) {
+ if (cipherdata && authdata)
+ pdb_type = pdb_mask[cipherdata->algtype]
+ [authdata->algtype];
+ }
+ break;
+
+ default:
+ pr_err("Invalid Sequence Number Size setting in PDB\n");
+ return -EINVAL;
+ }
+
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
+
+ switch (pdb_type) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ break;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ break;
+
+ default:
+ return PDCP_PDB_TYPE_INVALID;
+ }
+
+ return pdb_type;
+}
+/**
+ * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @sn_size: size of sequence numbers, only 5/12 bit sequence numbers are valid
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ enum pdcp_sn_size sn_size,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int, enum pdcp_sn_size,
+ unsigned char __maybe_unused) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
+ pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ sn_size,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_ENCAP_PROTOCOL,
+ sn_size,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_c_plane_decap - Function for creating a PDCP Control Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @sn_size: size of sequence numbers, only 5/12 bit sequence numbers are valid
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ enum pdcp_sn_size sn_size,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int, enum pdcp_sn_size,
+ unsigned char) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
+ pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ sn_size,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_DECAP_PROTOCOL,
+ sn_size,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+static int
+pdcp_insert_uplane_with_int_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ enum pdcp_sn_size sn_size,
+ unsigned char era_2_sw_hfn_ovrd,
+ unsigned int dir)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int, enum pdcp_sn_size,
+ unsigned char __maybe_unused) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_uplane_snow_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_uplane_aes_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_uplane_zuc_zuc_op /* ZUC-I */
+ },
+ };
+ int err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ dir,
+ sn_size,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_encap - Function for creating a PDCP User Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ enum pdb_type_e pdb_type;
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN ovrd for other era than 2");
+ return -EINVAL;
+ }
+
+ if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("Cannot use u-plane auth with era < 8");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ if (authdata)
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+ else
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn,
+ bearer, direction, hfn_threshold,
+ cipherdata, authdata);
+ if (pdb_type == PDCP_PDB_TYPE_INVALID) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ /* fallthrough */
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ case PDCP_CIPHER_TYPE_NULL:
+ if (rta_sec_era == RTA_SEC_ERA_8 &&
+ authdata && authdata->algtype == 0){
+ err = pdcp_insert_uplane_with_int_op(p, swap,
+ cipherdata, authdata,
+ sn_size, era_2_sw_hfn_ovrd,
+ OP_TYPE_ENCAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+
+ if (pdb_type != PDCP_PDB_TYPE_FULL_PDB) {
+ pr_err("PDB type must be FULL for PROTO desc\n");
+ return -EINVAL;
+ }
+
+ /* Insert auth key if requested */
+ if (authdata && authdata->algtype) {
+ KEY(p, KEY2, authdata->key_enc_flags,
+ (uint64_t)authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ }
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ (uint64_t)cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+
+ if (authdata)
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER_RN,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ else
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ case PDCP_SN_SIZE_18:
+ if (authdata) {
+ err = pdcp_insert_uplane_with_int_op(p, swap,
+ cipherdata, authdata,
+ sn_size, era_2_sw_hfn_ovrd,
+ OP_TYPE_ENCAP_PROTOCOL);
+ if (err)
+ return err;
+
+ break;
+ }
+
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_no_int_op(p, swap, cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL, sn_size);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_decap - Function for creating a PDCP User Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ enum pdb_type_e pdb_type;
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("Cannot use u-plane auth with era < 8");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ if (authdata)
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+ else
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+
+ pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer,
+ direction, hfn_threshold,
+ cipherdata, authdata);
+ if (pdb_type == PDCP_PDB_TYPE_INVALID) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ /* fallthrough */
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ case PDCP_CIPHER_TYPE_NULL:
+ if (pdb_type != PDCP_PDB_TYPE_FULL_PDB) {
+ pr_err("PDB type must be FULL for PROTO desc\n");
+ return -EINVAL;
+ }
+
+ /* Insert auth key if requested */
+ if (authdata && authdata->algtype)
+ KEY(p, KEY2, authdata->key_enc_flags,
+ (uint64_t)authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ if (authdata)
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER_RN,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ else
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ case PDCP_SN_SIZE_18:
+ if (authdata) {
+ err = pdcp_insert_uplane_with_int_op(p, swap,
+ cipherdata, authdata,
+ sn_size, era_2_sw_hfn_ovrd,
+ OP_TYPE_DECAP_PROTOCOL);
+ if (err)
+ return err;
+
+ break;
+ }
+
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_no_int_op(p, swap, cipherdata,
+ OP_TYPE_DECAP_PROTOCOL, sn_size);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_short_mac - Function for creating a PDCP Short MAC
+ * descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv[3] = {0, 0, 0};
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_NULL:
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, MATH0, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_SNOW:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0x04000000) : 0x04000000;
+ iv[2] = swap ? swab32(0xF8000000) : 0xF8000000;
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, MATH0, 0, 8, IMMED | COPY);
+ MOVE(p, MATH0, 0, IFIFOAB1, 0, 8, IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "cnstr_shdsc_pdcp_short_mac", authdata->algtype);
+ return -EINVAL;
+ }
+
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_PDCP_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta.h
new file mode 100644
index 000000000..c4bbad0b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta.h
@@ -0,0 +1,921 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_RTA_H__
+#define __RTA_RTA_H__
+
+#include "rta/sec_run_time_asm.h"
+#include "rta/fifo_load_store_cmd.h"
+#include "rta/header_cmd.h"
+#include "rta/jump_cmd.h"
+#include "rta/key_cmd.h"
+#include "rta/load_cmd.h"
+#include "rta/math_cmd.h"
+#include "rta/move_cmd.h"
+#include "rta/nfifo_cmd.h"
+#include "rta/operation_cmd.h"
+#include "rta/protocol_cmd.h"
+#include "rta/seq_in_out_ptr_cmd.h"
+#include "rta/signature_cmd.h"
+#include "rta/store_cmd.h"
+
+/**
+ * DOC: About
+ *
+ * RTA (Runtime Assembler) Library is an easy and flexible runtime method for
+ * writing SEC descriptors. It implements a thin abstraction layer above
+ * SEC commands set; the resulting code is compact and similar to a
+ * descriptor sequence.
+ *
+ * RTA library improves comprehension of the SEC code, adds flexibility for
+ * writing complex descriptors and keeps the code lightweight. Should be used
+ * by whom needs to encode descriptors at runtime, with comprehensible flow
+ * control in descriptor.
+ */
+
+/**
+ * DOC: Usage
+ *
+ * RTA is used in kernel space by the SEC / CAAM (Cryptographic Acceleration and
+ * Assurance Module) kernel module (drivers/crypto/caam) and SEC / CAAM QI
+ * kernel module (Freescale QorIQ SDK).
+ *
+ * RTA is used in user space by USDPAA - User Space DataPath Acceleration
+ * Architecture (Freescale QorIQ SDK).
+ */
+
+/**
+ * DOC: Descriptor Buffer Management Routines
+ *
+ * Contains details of RTA descriptor buffer management and SEC Era
+ * management routines.
+ */
+
+/**
+ * PROGRAM_CNTXT_INIT - must be called before any descriptor run-time assembly
+ * call type field carry info i.e. whether descriptor is
+ * shared or job descriptor.
+ * @program: pointer to struct program
+ * @buffer: input buffer where the descriptor will be placed (uint32_t *)
+ * @offset: offset in input buffer from where the data will be written
+ * (unsigned int)
+ */
+#define PROGRAM_CNTXT_INIT(program, buffer, offset) \
+ rta_program_cntxt_init(program, buffer, offset)
+
+/**
+ * PROGRAM_FINALIZE - must be called to mark completion of RTA call.
+ * @program: pointer to struct program
+ *
+ * Return: total size of the descriptor in words or negative number on error.
+ */
+#define PROGRAM_FINALIZE(program) rta_program_finalize(program)
+
+/**
+ * PROGRAM_SET_36BIT_ADDR - must be called to set pointer size to 36 bits
+ * @program: pointer to struct program
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_36BIT_ADDR(program) rta_program_set_36bit_addr(program)
+
+/**
+ * PROGRAM_SET_BSWAP - must be called to enable byte swapping
+ * @program: pointer to struct program
+ *
+ * Byte swapping on a 4-byte boundary will be performed at the end - when
+ * calling PROGRAM_FINALIZE().
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_BSWAP(program) rta_program_set_bswap(program)
+
+/**
+ * WORD - must be called to insert in descriptor buffer a 32bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint32_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define WORD(program, val) rta_word(program, val)
+
+/**
+ * DWORD - must be called to insert in descriptor buffer a 64bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint64_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define DWORD(program, val) rta_dword(program, val)
+
+/**
+ * COPY_DATA - must be called to insert in descriptor buffer data larger than
+ * 64bits.
+ * @program: pointer to struct program
+ * @data: input data to be written in descriptor buffer (uint8_t *)
+ * @len: length of input data (unsigned int)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define COPY_DATA(program, data, len) rta_copy_data(program, (data), (len))
+
+/**
+ * DESC_LEN - determines job / shared descriptor buffer length (in words)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in words (unsigned int).
+ */
+#define DESC_LEN(buffer) rta_desc_len(buffer)
+
+/**
+ * DESC_BYTES - determines job / shared descriptor buffer length (in bytes)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in bytes (unsigned int).
+ */
+#define DESC_BYTES(buffer) rta_desc_bytes(buffer)
+
+/*
+ * SEC HW block revision.
+ *
+ * This *must not be confused with SEC version*:
+ * - SEC HW block revision format is "v"
+ * - SEC revision format is "x.y"
+ */
+extern enum rta_sec_era rta_sec_era;
+
+/**
+ * rta_set_sec_era - Set SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ * @era: SEC Era (enum rta_sec_era)
+ *
+ * Return: 0 if the ERA was set successfully, -1 otherwise (int)
+ *
+ * Warning 1: Must be called *only once*, *before* using any other RTA API
+ * routine.
+ *
+ * Warning 2: *Not thread safe*.
+ */
+static inline int
+rta_set_sec_era(enum rta_sec_era era)
+{
+ if (era > MAX_SEC_ERA) {
+ rta_sec_era = DEFAULT_SEC_ERA;
+ pr_err("Unsupported SEC ERA. Defaulting to ERA %d\n",
+ DEFAULT_SEC_ERA + 1);
+ return -1;
+ }
+
+ rta_sec_era = era;
+ return 0;
+}
+
+/**
+ * rta_get_sec_era - Get SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ *
+ * Return: SEC Era (unsigned int).
+ */
+static inline unsigned int
+rta_get_sec_era(void)
+{
+ return rta_sec_era;
+}
+
+/**
+ * DOC: SEC Commands Routines
+ *
+ * Contains details of RTA wrapper routines over SEC engine commands.
+ */
+
+/**
+ * SHR_HDR - Configures Shared Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the shared
+ * descriptor should start (@c unsigned int).
+ * @flags: operational flags: RIF, DNR, CIF, SC, PD
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SHR_HDR(program, share, start_idx, flags) \
+ rta_shr_header(program, share, start_idx, flags)
+
+/**
+ * JOB_HDR - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR(program, share, start_idx, share_desc, flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags, 0)
+
+/**
+ * JOB_HDR_EXT - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ * @ext_flags: extended header flags: DSV (DECO Select Valid), DECO Id (limited
+ * by DSEL_MASK).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR_EXT(program, share, start_idx, share_desc, flags, ext_flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags | EXT, \
+ ext_flags)
+
+/**
+ * MOVE - Configures MOVE and MOVE_LEN commands
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVE(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVE, src, src_offset, dst, dst_offset, length, opt)
+
+/**
+ * MOVEB - Configures MOVEB command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command if byte swapping not enabled; else - when src/dst
+ * is descriptor buffer or MATH registers, data type is byte array when MOVE
+ * data type is 4-byte array and vice versa.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEB(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEB, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * MOVEDW - Configures MOVEDW command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command, with the following differences: data type is
+ * 8-byte array; word swapping is performed when SEC is programmed in little
+ * endian mode.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEDW(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEDW, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * FIFOLOAD - Configures FIFOLOAD command to load message data, PKHA data, IV,
+ * ICV, AAD and bit length message data into Input Data FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @src: pointer or actual data in case of immediate load; IMMED, COPY and DCOPY
+ * flags indicate action taken (inline imm data, inline ptr, inline from
+ * ptr).
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, IMMED, EXT, CLASS1, CLASS2, BOTH, FLUSH1,
+ * LAST1, LAST2, COPY, DCOPY.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOLOAD(program, data, src, length, flags) \
+ rta_fifo_load(program, data, src, length, flags)
+
+/**
+ * SEQFIFOLOAD - Configures SEQ FIFOLOAD command to load message data, PKHA
+ * data, IV, ICV, AAD and bit length message data into Input Data
+ * FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CLASS1, CLASS2, BOTH, FLUSH1, LAST1, LAST2,
+ * AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOLOAD(program, data, length, flags) \
+ rta_fifo_load(program, data, NONE, length, flags|SEQ)
+
+/**
+ * FIFOSTORE - Configures FIFOSTORE command, to move data from Output Data FIFO
+ * to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOSTORE(program, data, encrypt_flags, dst, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, dst, length, flags)
+
+/**
+ * SEQFIFOSTORE - Configures SEQ FIFOSTORE command, to move data from Output
+ * Data FIFO to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, METADATA, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOSTORE(program, data, encrypt_flags, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, 0, length, flags|SEQ)
+
+/**
+ * KEY - Configures KEY and SEQ KEY commands
+ * @program: pointer to struct program
+ * @key_dst: key store location: KEY1, KEY2, PKE, AFHA_SBOX, MDHA_SPLIT_KEY
+ * @encrypt_flags: key encryption mode: ENC, EKT, TK, NWB, PTS
+ * @src: pointer or actual data in case of immediate load (uint64_t); IMMED,
+ * COPY and DCOPY flags indicate action taken (inline imm data,
+ * inline ptr, inline from ptr).
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: for KEY: SGF, IMMED, COPY, DCOPY; for SEQKEY: SEQ,
+ * VLF, AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define KEY(program, key_dst, encrypt_flags, src, length, flags) \
+ rta_key(program, key_dst, encrypt_flags, src, length, flags)
+
+/**
+ * SEQINPTR - Configures SEQ IN PTR command
+ * @program: pointer to struct program
+ * @src: starting address for Input Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Input Sequence (uint32_t)
+ * @flags: operational flags: RBS, INL, SGF, PRE, EXT, RTO, RJD, SOP (when PRE,
+ * RTO or SOP are set, @src parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQINPTR(program, src, length, flags) \
+ rta_seq_in_ptr(program, src, length, flags)
+
+/**
+ * SEQOUTPTR - Configures SEQ OUT PTR command
+ * @program: pointer to struct program
+ * @dst: starting address for Output Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Output Sequence (uint32_t)
+ * @flags: operational flags: SGF, PRE, EXT, RTO, RST, EWS (when PRE or RTO are
+ * set, @dst parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQOUTPTR(program, dst, length, flags) \
+ rta_seq_out_ptr(program, dst, length, flags)
+
+/**
+ * ALG_OPERATION - Configures ALGORITHM OPERATION command
+ * @program: pointer to struct program
+ * @cipher_alg: algorithm to be used
+ * @aai: Additional Algorithm Information; contains mode information that is
+ * associated with the algorithm (check desc.h for specific values).
+ * @algo_state: algorithm state; defines the state of the algorithm that is
+ * being executed (check desc.h file for specific values).
+ * @icv_check: ICV checking; selects whether the algorithm should check
+ * calculated ICV with known ICV: ICV_CHECK_ENABLE,
+ * ICV_CHECK_DISABLE.
+ * @enc: selects between encryption and decryption: DIR_ENC, DIR_DEC
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define ALG_OPERATION(program, cipher_alg, aai, algo_state, icv_check, enc) \
+ rta_operation(program, cipher_alg, aai, algo_state, icv_check, enc)
+
+/**
+ * PROTOCOL - Configures PROTOCOL OPERATION command
+ * @program: pointer to struct program
+ * @optype: operation type: OP_TYPE_UNI_PROTOCOL / OP_TYPE_DECAP_PROTOCOL /
+ * OP_TYPE_ENCAP_PROTOCOL.
+ * @protid: protocol identifier value (check desc.h file for specific values)
+ * @protoinfo: protocol dependent value (check desc.h file for specific values)
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PROTOCOL(program, optype, protid, protoinfo) \
+ rta_proto_operation(program, optype, protid, protoinfo)
+
+/**
+ * DKP_PROTOCOL - Configures DKP (Derived Key Protocol) PROTOCOL command
+ * @program: pointer to struct program
+ * @protid: protocol identifier value - one of the following:
+ * OP_PCLID_DKP_{MD5 | SHA1 | SHA224 | SHA256 | SHA384 | SHA512}
+ * @key_src: How the initial ("negotiated") key is provided to the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_SRC_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @key_dst: How the derived ("split") key is returned by the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_DST_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @keylen: length of the initial key, in bytes (uint16_t)
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_type: enum rta_data_type
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define DKP_PROTOCOL(program, protid, key_src, key_dst, keylen, key, key_type) \
+ rta_dkp_proto(program, protid, key_src, key_dst, keylen, key, key_type)
+
+/**
+ * PKHA_OPERATION - Configures PKHA OPERATION command
+ * @program: pointer to struct program
+ * @op_pkha: PKHA operation; indicates the modular arithmetic function to
+ * execute (check desc.h file for specific values).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PKHA_OPERATION(program, op_pkha) rta_pkha_operation(program, op_pkha)
+
+/**
+ * JUMP - Configures JUMP command
+ * @program: pointer to struct program
+ * @addr: local offset for local jumps or address pointer for non-local jumps;
+ * IMM or PTR macros must be used to indicate type.
+ * @jump_type: type of action taken by jump (enum rta_jump_type)
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: operational flags - DONE1, DONE2, BOTH; various
+ * sharing and wait conditions (JSL = 1) - NIFP, NIP, NOP, NCP, CALM,
+ * SELF, SHARED, JQP; Math and PKHA status conditions (JSL = 0) - Z, N,
+ * NV, C, PK0, PK1, PKP.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP(program, addr, jump_type, test_type, cond) \
+ rta_jump(program, addr, jump_type, test_type, cond, NONE)
+
+/**
+ * JUMP_INC - Configures JUMP_INC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_INC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_INC, test_type, cond, src_dst)
+
+/**
+ * JUMP_DEC - Configures JUMP_DEC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_DEC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_DEC, test_type, cond, src_dst)
+
+/**
+ * LOAD - Configures LOAD command to load data registers from descriptor or from
+ * a memory location.
+ * @program: pointer to struct program
+ * @addr: immediate value or pointer to the data to be loaded; IMMED, COPY and
+ * DCOPY flags indicate action taken (inline imm data, inline ptr, inline
+ * from ptr).
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define LOAD(program, addr, dst, offset, length, flags) \
+ rta_load(program, addr, dst, offset, length, flags)
+
+/**
+ * SEQLOAD - Configures SEQ LOAD command to load data registers from descriptor
+ * or from a memory location.
+ * @program: pointer to struct program
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQLOAD(program, dst, offset, length, flags) \
+ rta_load(program, NONE, dst, offset, length, flags|SEQ)
+
+/**
+ * STORE - Configures STORE command to read data from registers and write them
+ * to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define STORE(program, src, offset, dst, length, flags) \
+ rta_store(program, src, offset, dst, length, flags)
+
+/**
+ * SEQSTORE - Configures SEQ STORE command to read data from registers and write
+ * them to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: SGF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQSTORE(program, src, offset, length, flags) \
+ rta_store(program, src, offset, NONE, length, flags|SEQ)
+
+/**
+ * MATHB - Configures MATHB command to perform binary operations
+ * @program: pointer to struct program
+ * @operand1: first operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, SHLD.
+ * @operand2: second operand: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD,
+ * OFIFO, JOBSRC, ZERO, ONE, Immediate value. IMMED2 must be used to
+ * indicate immediate value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: IFB, NFU, STL, SWP, IMMED, IMMED2
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHB(program, operand1, operator, operand2, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, operand2, result, \
+ length, opt)
+
+/**
+ * MATHI - Configures MATHI command to perform binary operations
+ * @program: pointer to struct program
+ * @operand: if !SSEL: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE.
+ * if SSEL: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD, OFIFO,
+ * JOBSRC, ZERO, ONE.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, FBYT (for !SSEL only).
+ * @imm: Immediate value (uint8_t). IMMED must be used to indicate immediate
+ * value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int). @imm is left-extended with zeros if needed.
+ * @opt: operational flags: NFU, SSEL, SWP, IMMED
+ *
+ * If !SSEL, @operand <@operator> @imm -> @result
+ * If SSEL, @imm <@operator> @operand -> @result
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHI(program, operand, operator, imm, result, length, opt) \
+ rta_mathi(program, operand, MATH_FUN_##operator, imm, result, length, \
+ opt)
+
+/**
+ * MATHU - Configures MATHU command to perform unary operations
+ * @program: pointer to struct program
+ * @operand1: operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ZBYT, BSWAP
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: NFU, STL, SWP, IMMED
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHU(program, operand1, operator, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, NONE, result, length, \
+ opt)
+
+/**
+ * SIGNATURE - Configures SIGNATURE command
+ * @program: pointer to struct program
+ * @sign_type: signature type: SIGN_TYPE_FINAL, SIGN_TYPE_FINAL_RESTORE,
+ * SIGN_TYPE_FINAL_NONZERO, SIGN_TYPE_IMM_2, SIGN_TYPE_IMM_3,
+ * SIGN_TYPE_IMM_4.
+ *
+ * After SIGNATURE command, DWORD or WORD must be used to insert signature in
+ * descriptor buffer.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SIGNATURE(program, sign_type) rta_signature(program, sign_type)
+
+/**
+ * NFIFOADD - Configures NFIFO command, a shortcut of RTA Load command to write
+ * to iNfo FIFO.
+ * @program: pointer to struct program
+ * @src: source for the input data in Alignment Block:IFIFO, OFIFO, PAD,
+ * MSGOUTSNOOP, ALTSOURCE, OFIFO_SYNC, MSGOUTSNOOP_ALT.
+ * @data: type of data that is going through the Input Data FIFO: MSG, MSG1,
+ * MSG2, IV1, IV2, ICV1, ICV2, SAD1, AAD1, AAD2, AFHA_SBOX, SKIP,
+ * PKHA registers, AB1, AB2, ABD.
+ * @length: length of the data copied in FIFO registers (uint32_t)
+ * @flags: select options between:
+ * -operational flags: LAST1, LAST2, FLUSH1, FLUSH2, OC, BP
+ * -when PAD is selected as source: BM, PR, PS
+ * -padding type: <em>PAD_ZERO, PAD_NONZERO, PAD_INCREMENT, PAD_RANDOM,
+ * PAD_ZERO_N1, PAD_NONZERO_0, PAD_N1, PAD_NONZERO_N
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define NFIFOADD(program, src, data, length, flags) \
+ rta_nfifo_load(program, src, data, length, flags)
+
+/**
+ * DOC: Self Referential Code Management Routines
+ *
+ * Contains details of RTA self referential code routines.
+ */
+
+/**
+ * REFERENCE - initialize a variable used for storing an index inside a
+ * descriptor buffer.
+ * @ref: reference to a descriptor buffer's index where an update is required
+ * with a value that will be known latter in the program flow.
+ */
+#define REFERENCE(ref) int ref = -1
+
+/**
+ * LABEL - initialize a variable used for storing an index inside a descriptor
+ * buffer.
+ * @label: label stores the value with what should be updated the REFERENCE line
+ * in the descriptor buffer.
+ */
+#define LABEL(label) unsigned int label = 0
+
+/**
+ * SET_LABEL - set a LABEL value
+ * @program: pointer to struct program
+ * @label: value that will be inserted in a line previously written in the
+ * descriptor buffer.
+ */
+#define SET_LABEL(program, label) (label = rta_set_label(program))
+
+/**
+ * PATCH_JUMP - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For JUMP command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_JUMP(program, line, new_ref) rta_patch_jmp(program, line, new_ref)
+
+/**
+ * PATCH_MOVE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For MOVE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_MOVE(program, line, new_ref) \
+ rta_patch_move(program, line, new_ref)
+
+/**
+ * PATCH_LOAD - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For LOAD command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_LOAD(program, line, new_ref) \
+ rta_patch_load(program, line, new_ref)
+
+/**
+ * PATCH_STORE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For STORE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_STORE(program, line, new_ref) \
+ rta_patch_store(program, line, new_ref)
+
+/**
+ * PATCH_HDR - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For HEADER command, the value represents the start index field.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_HDR(program, line, new_ref) \
+ rta_patch_header(program, line, new_ref)
+
+/**
+ * PATCH_RAW - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @mask: mask to be used for applying the new value (unsigned int). The mask
+ * selects which bits from the provided @new_val are taken into
+ * consideration when overwriting the existing value.
+ * @new_val: updated value that will be masked using the provided mask value
+ * and inserted in descriptor buffer at the specified line.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_RAW(program, line, mask, new_val) \
+ rta_patch_raw(program, line, mask, new_val)
+
+#endif /* __RTA_RTA_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/fifo_load_store_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/fifo_load_store_cmd.h
new file mode 100644
index 000000000..287e09cd7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/fifo_load_store_cmd.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_FIFO_LOAD_STORE_CMD_H__
+#define __RTA_FIFO_LOAD_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t fifo_load_table[][2] = {
+/*1*/ { PKA0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A0 },
+ { PKA1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A1 },
+ { PKA2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A2 },
+ { PKA3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A3 },
+ { PKB0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B0 },
+ { PKB1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B1 },
+ { PKB2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B2 },
+ { PKB3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B3 },
+ { PKA, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A },
+ { PKB, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B },
+ { PKN, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_N },
+ { SKIP, FIFOLD_CLASS_SKIP },
+ { MSG1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG },
+ { MSG2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG },
+ { MSGOUTSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG1OUT2 },
+ { MSGINSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG },
+ { IV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV },
+ { IV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_IV },
+ { AAD1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_AAD },
+ { ICV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV },
+ { ICV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV },
+ { BIT_DATA, FIFOLD_TYPE_BITDATA },
+/*23*/ { IFIFO, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_NOINFOFIFO }
+};
+
+/*
+ * Allowed FIFO_LOAD input data types for each SEC Era.
+ * Values represent the number of entries from fifo_load_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_load_table_sz[] = {22, 22, 23, 23,
+ 23, 23, 23, 23,
+ 23, 23};
+
+static inline int
+rta_fifo_load(struct program *program, uint32_t src,
+ uint64_t loc, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t ext_length = 0, val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_LOAD;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_LOAD;
+ }
+
+ /* Parameters checking */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQ FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) && (flags & AIDF)) {
+ pr_err("SEQ FIFO LOAD: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & VLF) && ((flags & EXT) || (length >> 16))) {
+ pr_err("SEQ FIFO LOAD: Invalid usage of VLF\n");
+ goto err;
+ }
+ } else {
+ if (src == SKIP) {
+ pr_err("FIFO LOAD: Invalid src\n");
+ goto err;
+ }
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("FIFO LOAD: Invalid usage of SGF and IMM\n");
+ goto err;
+ }
+ if ((flags & IMMED) && ((flags & EXT) || (length >> 16))) {
+ pr_err("FIFO LOAD: Invalid usage of EXT and IMM\n");
+ goto err;
+ }
+ }
+
+ /* write input data type field */
+ ret = __rta_map_opcode(src, fifo_load_table,
+ fifo_load_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO LOAD: Source value is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (flags & CLASS1)
+ opcode |= FIFOLD_CLASS_CLASS1;
+ if (flags & CLASS2)
+ opcode |= FIFOLD_CLASS_CLASS2;
+ if (flags & BOTH)
+ opcode |= FIFOLD_CLASS_BOTH;
+
+ /* write fields: SGF|VLF, IMM, [LC1, LC2, F1] */
+ if (flags & FLUSH1)
+ opcode |= FIFOLD_TYPE_FLUSH1;
+ if (flags & LAST1)
+ opcode |= FIFOLD_TYPE_LAST1;
+ if (flags & LAST2)
+ opcode |= FIFOLD_TYPE_LAST2;
+ if (!is_seq_cmd) {
+ if (flags & SGF)
+ opcode |= FIFOLDST_SGF;
+ if (flags & IMMED)
+ opcode |= FIFOLD_IMM;
+ } else {
+ if (flags & VLF)
+ opcode |= FIFOLDST_VLF;
+ if (flags & AIDF)
+ opcode |= FIFOLD_AIDF;
+ }
+
+ /*
+ * Verify if extended length is required. In case of BITDATA, calculate
+ * number of full bytes and additional valid bits.
+ */
+ if ((flags & EXT) || (length >> 16)) {
+ opcode |= FIFOLDST_EXT;
+ if (src == BIT_DATA) {
+ ext_length = (length / 8);
+ length = (length % 8);
+ } else {
+ ext_length = length;
+ length = 0;
+ }
+ }
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (flags & IMMED)
+ __rta_inline_data(program, loc, flags & __COPY_MASK, length);
+ else if (!is_seq_cmd)
+ __rta_out64(program, program->ps, loc);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, ext_length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static const uint32_t fifo_store_table[][2] = {
+/*1*/ { PKA0, FIFOST_TYPE_PKHA_A0 },
+ { PKA1, FIFOST_TYPE_PKHA_A1 },
+ { PKA2, FIFOST_TYPE_PKHA_A2 },
+ { PKA3, FIFOST_TYPE_PKHA_A3 },
+ { PKB0, FIFOST_TYPE_PKHA_B0 },
+ { PKB1, FIFOST_TYPE_PKHA_B1 },
+ { PKB2, FIFOST_TYPE_PKHA_B2 },
+ { PKB3, FIFOST_TYPE_PKHA_B3 },
+ { PKA, FIFOST_TYPE_PKHA_A },
+ { PKB, FIFOST_TYPE_PKHA_B },
+ { PKN, FIFOST_TYPE_PKHA_N },
+ { PKE, FIFOST_TYPE_PKHA_E_JKEK },
+ { RNG, FIFOST_TYPE_RNGSTORE },
+ { RNGOFIFO, FIFOST_TYPE_RNGFIFO },
+ { AFHA_SBOX, FIFOST_TYPE_AF_SBOX_JKEK },
+ { MDHA_SPLIT_KEY, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_SPLIT_KEK },
+ { MSG, FIFOST_TYPE_MESSAGE_DATA },
+ { KEY1, FIFOST_CLASS_CLASS1KEY | FIFOST_TYPE_KEY_KEK },
+ { KEY2, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_KEY_KEK },
+ { OFIFO, FIFOST_TYPE_OUTFIFO_KEK},
+ { SKIP, FIFOST_TYPE_SKIP },
+/*22*/ { METADATA, FIFOST_TYPE_METADATA},
+ { MSG_CKSUM, FIFOST_TYPE_MESSAGE_DATA2 }
+};
+
+/*
+ * Allowed FIFO_STORE output data types for each SEC Era.
+ * Values represent the number of entries from fifo_store_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_store_table_sz[] = {21, 21, 21, 21,
+ 22, 22, 22, 23,
+ 23, 23};
+
+static inline int
+rta_fifo_store(struct program *program, uint32_t src,
+ uint32_t encrypt_flags, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_STORE;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_STORE;
+ }
+
+ /* Parameter checking */
+ if (is_seq_cmd) {
+ if ((flags & VLF) && ((length >> 16) || (flags & EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid usage of VLF\n");
+ goto err;
+ }
+ if (dst) {
+ pr_err("SEQ FIFO STORE: Invalid command\n");
+ goto err;
+ }
+ if ((src == METADATA) && (flags & (CONT | EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid flags\n");
+ goto err;
+ }
+ } else {
+ if (((src == RNGOFIFO) && ((dst) || (flags & EXT))) ||
+ (src == METADATA)) {
+ pr_err("FIFO STORE: Invalid destination\n");
+ goto err;
+ }
+ }
+ if ((rta_sec_era == RTA_SEC_ERA_7) && (src == AFHA_SBOX)) {
+ pr_err("FIFO STORE: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write output data type field */
+ ret = __rta_map_opcode(src, fifo_store_table,
+ fifo_store_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO STORE: Source type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (encrypt_flags & TK)
+ opcode |= (0x1 << FIFOST_TYPE_SHIFT);
+ if (encrypt_flags & EKT) {
+ if (rta_sec_era == RTA_SEC_ERA_1) {
+ pr_err("FIFO STORE: AES-CCM source types not supported\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ opcode |= (0x10 << FIFOST_TYPE_SHIFT);
+ opcode &= (uint32_t)~(0x20 << FIFOST_TYPE_SHIFT);
+ }
+
+ /* write flags fields */
+ if (flags & CONT)
+ opcode |= FIFOST_CONT;
+ if ((flags & VLF) && (is_seq_cmd))
+ opcode |= FIFOLDST_VLF;
+ if ((flags & SGF) && (!is_seq_cmd))
+ opcode |= FIFOLDST_SGF;
+ if (flags & CLASS1)
+ opcode |= FIFOST_CLASS_CLASS1KEY;
+ if (flags & CLASS2)
+ opcode |= FIFOST_CLASS_CLASS2KEY;
+ if (flags & BOTH)
+ opcode |= FIFOST_CLASS_BOTH;
+
+ /* Verify if extended length is required */
+ if ((length >> 16) || (flags & EXT))
+ opcode |= FIFOLDST_EXT;
+ else
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer field */
+ if ((!is_seq_cmd) && (dst))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_FIFO_LOAD_STORE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/header_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/header_cmd.h
new file mode 100644
index 000000000..45aefa04c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/header_cmd.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_HEADER_CMD_H__
+#define __RTA_HEADER_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed job header flags for each SEC Era. */
+static const uint32_t job_header_flags[] = {
+ DNR | TD | MTD | SHR | REO,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | EXT,
+ DNR | TD | MTD | SHR | REO | EXT,
+ DNR | TD | MTD | SHR | REO | EXT
+};
+
+/* Allowed shared header flags for each SEC Era. */
+static const uint32_t shr_header_flags[] = {
+ DNR | SC | PD,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF
+};
+
+static inline int
+rta_shr_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint32_t flags)
+{
+ uint32_t opcode = CMD_SHARED_DESC_HDR;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~shr_header_flags[rta_sec_era]) {
+ pr_err("SHR_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ default:
+ pr_err("SHR_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ if (rta_sec_era >= RTA_SEC_ERA_10)
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) &
+ HDR_START_IDX_MASK_ERA10;
+ else
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) &
+ HDR_START_IDX_MASK;
+
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & CIF)
+ opcode |= HDR_CLEAR_IFIFO;
+ if (flags & SC)
+ opcode |= HDR_SAVECTX;
+ if (flags & PD)
+ opcode |= HDR_PROP_DNR;
+ if (flags & RIF)
+ opcode |= HDR_RIF;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1)
+ program->shrhdr = program->buffer;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+static inline int
+rta_job_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint64_t shr_desc, uint32_t flags,
+ uint32_t ext_flags)
+{
+ uint32_t opcode = CMD_DESC_HDR;
+ uint32_t hdr_ext = 0;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~job_header_flags[rta_sec_era]) {
+ pr_err("JOB_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ case SHR_DEFER:
+ opcode |= HDR_SHARE_DEFER;
+ break;
+ default:
+ pr_err("JOB_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & TD) && (flags & REO)) {
+ pr_err("JOB_DESC: REO flag not supported for trusted descriptors. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (flags & MTD) && !(flags & TD)) {
+ pr_err("JOB_DESC: Trying to MTD a descriptor that is not a TD. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & EXT) && !(flags & SHR) && (start_idx < 2)) {
+ pr_err("JOB_DESC: Start index must be >= 2 in case of no SHR and EXT. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ if (rta_sec_era >= RTA_SEC_ERA_10)
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) &
+ HDR_START_IDX_MASK_ERA10;
+ else
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) &
+ HDR_START_IDX_MASK;
+
+ if (flags & EXT) {
+ opcode |= HDR_EXT;
+
+ if (ext_flags & DSV) {
+ hdr_ext |= HDR_EXT_DSEL_VALID;
+ hdr_ext |= ext_flags & DSEL_MASK;
+ }
+
+ if (ext_flags & FTD) {
+ if (rta_sec_era <= RTA_SEC_ERA_5) {
+ pr_err("JOB_DESC: Fake trusted descriptor not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ hdr_ext |= HDR_EXT_FTD;
+ }
+ }
+ if (flags & RSMS)
+ opcode |= HDR_RSLS;
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & TD)
+ opcode |= HDR_TRUSTED;
+ if (flags & MTD)
+ opcode |= HDR_MAKE_TRUSTED;
+ if (flags & REO)
+ opcode |= HDR_REVERSE;
+ if (flags & SHR)
+ opcode |= HDR_SHARED;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1) {
+ program->jobhdr = program->buffer;
+
+ if (opcode & HDR_SHARED)
+ __rta_out64(program, program->ps, shr_desc);
+ }
+
+ if (flags & EXT)
+ __rta_out32(program, hdr_ext);
+
+ /* Note: descriptor length is set in program_finalize routine */
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_HEADER_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/jump_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/jump_cmd.h
new file mode 100644
index 000000000..18f781e37
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/jump_cmd.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_JUMP_CMD_H__
+#define __RTA_JUMP_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t jump_test_cond[][2] = {
+ { NIFP, JUMP_COND_NIFP },
+ { NIP, JUMP_COND_NIP },
+ { NOP, JUMP_COND_NOP },
+ { NCP, JUMP_COND_NCP },
+ { CALM, JUMP_COND_CALM },
+ { SELF, JUMP_COND_SELF },
+ { SHRD, JUMP_COND_SHRD },
+ { JQP, JUMP_COND_JQP },
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C },
+ { PK_0, JUMP_COND_PK_0 },
+ { PK_GCD_1, JUMP_COND_PK_GCD_1 },
+ { PK_PRIME, JUMP_COND_PK_PRIME },
+ { CLASS1, JUMP_CLASS_CLASS1 },
+ { CLASS2, JUMP_CLASS_CLASS2 },
+ { BOTH, JUMP_CLASS_BOTH }
+};
+
+static const uint32_t jump_test_math_cond[][2] = {
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C }
+};
+
+static const uint32_t jump_src_dst[][2] = {
+ { MATH0, JUMP_SRC_DST_MATH0 },
+ { MATH1, JUMP_SRC_DST_MATH1 },
+ { MATH2, JUMP_SRC_DST_MATH2 },
+ { MATH3, JUMP_SRC_DST_MATH3 },
+ { DPOVRD, JUMP_SRC_DST_DPOVRD },
+ { SEQINSZ, JUMP_SRC_DST_SEQINLEN },
+ { SEQOUTSZ, JUMP_SRC_DST_SEQOUTLEN },
+ { VSEQINSZ, JUMP_SRC_DST_VARSEQINLEN },
+ { VSEQOUTSZ, JUMP_SRC_DST_VARSEQOUTLEN }
+};
+
+static inline int
+rta_jump(struct program *program, uint64_t address,
+ enum rta_jump_type jump_type,
+ enum rta_jump_cond test_type,
+ uint32_t test_condition, uint32_t src_dst)
+{
+ uint32_t opcode = CMD_JUMP;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ if (((jump_type == GOSUB) || (jump_type == RETURN)) &&
+ (rta_sec_era < RTA_SEC_ERA_4)) {
+ pr_err("JUMP: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (((jump_type == LOCAL_JUMP_INC) || (jump_type == LOCAL_JUMP_DEC)) &&
+ (rta_sec_era <= RTA_SEC_ERA_5)) {
+ pr_err("JUMP_INCDEC: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (jump_type) {
+ case (LOCAL_JUMP):
+ /*
+ * opcode |= JUMP_TYPE_LOCAL;
+ * JUMP_TYPE_LOCAL is 0
+ */
+ break;
+ case (HALT):
+ opcode |= JUMP_TYPE_HALT;
+ break;
+ case (HALT_STATUS):
+ opcode |= JUMP_TYPE_HALT_USER;
+ break;
+ case (FAR_JUMP):
+ opcode |= JUMP_TYPE_NONLOCAL;
+ break;
+ case (GOSUB):
+ opcode |= JUMP_TYPE_GOSUB;
+ break;
+ case (RETURN):
+ opcode |= JUMP_TYPE_RETURN;
+ break;
+ case (LOCAL_JUMP_INC):
+ opcode |= JUMP_TYPE_LOCAL_INC;
+ break;
+ case (LOCAL_JUMP_DEC):
+ opcode |= JUMP_TYPE_LOCAL_DEC;
+ break;
+ default:
+ pr_err("JUMP: Invalid jump type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ switch (test_type) {
+ case (ALL_TRUE):
+ /*
+ * opcode |= JUMP_TEST_ALL;
+ * JUMP_TEST_ALL is 0
+ */
+ break;
+ case (ALL_FALSE):
+ opcode |= JUMP_TEST_INVALL;
+ break;
+ case (ANY_TRUE):
+ opcode |= JUMP_TEST_ANY;
+ break;
+ case (ANY_FALSE):
+ opcode |= JUMP_TEST_INVANY;
+ break;
+ default:
+ pr_err("JUMP: test type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ /* write test condition field */
+ if ((jump_type != LOCAL_JUMP_INC) && (jump_type != LOCAL_JUMP_DEC)) {
+ __rta_map_flags(test_condition, jump_test_cond,
+ ARRAY_SIZE(jump_test_cond), &opcode);
+ } else {
+ uint32_t val = 0;
+
+ ret = __rta_map_opcode(src_dst, jump_src_dst,
+ ARRAY_SIZE(jump_src_dst), &val);
+ if (ret < 0) {
+ pr_err("JUMP_INCDEC: SRC_DST not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ __rta_map_flags(test_condition, jump_test_math_cond,
+ ARRAY_SIZE(jump_test_math_cond), &opcode);
+ }
+
+ /* write local offset field for local jumps and user-defined halt */
+ if ((jump_type == LOCAL_JUMP) || (jump_type == LOCAL_JUMP_INC) ||
+ (jump_type == LOCAL_JUMP_DEC) || (jump_type == GOSUB) ||
+ (jump_type == HALT_STATUS))
+ opcode |= (uint32_t)(address & JUMP_OFFSET_MASK);
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (jump_type == FAR_JUMP)
+ __rta_out64(program, program->ps, address);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_JUMP_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/key_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/key_cmd.h
new file mode 100644
index 000000000..ec3fbcaf6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/key_cmd.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_KEY_CMD_H__
+#define __RTA_KEY_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed encryption flags for each SEC Era */
+static const uint32_t key_enc_flags[] = {
+ ENC,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS
+};
+
+static inline int
+rta_key(struct program *program, uint32_t key_dst,
+ uint32_t encrypt_flags, uint64_t src, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if (encrypt_flags & ~key_enc_flags[rta_sec_era]) {
+ pr_err("KEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write cmd type */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_KEY;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_KEY;
+ }
+
+ /* check parameters */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQKEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) &&
+ ((flags & VLF) || (flags & AIDF))) {
+ pr_err("SEQKEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ } else {
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((flags & SGF) && (flags & IMMED)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ if ((encrypt_flags & PTS) &&
+ ((encrypt_flags & ENC) || (encrypt_flags & NWB) ||
+ (key_dst == PKE))) {
+ pr_err("KEY: Invalid flag / destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (key_dst == AFHA_SBOX) {
+ if (rta_sec_era == RTA_SEC_ERA_7) {
+ pr_err("KEY: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * Sbox data loaded into the ARC-4 processor must be exactly
+ * 258 bytes long, or else a data sequence error is generated.
+ */
+ if (length != 258) {
+ pr_err("KEY: Invalid length. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /* write key destination and class fields */
+ switch (key_dst) {
+ case (KEY1):
+ opcode |= KEY_DEST_CLASS1;
+ break;
+ case (KEY2):
+ opcode |= KEY_DEST_CLASS2;
+ break;
+ case (PKE):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_PKHA_E;
+ break;
+ case (AFHA_SBOX):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_AFHA_SBOX;
+ break;
+ case (MDHA_SPLIT_KEY):
+ opcode |= KEY_DEST_CLASS2 | KEY_DEST_MDHA_SPLIT;
+ break;
+ default:
+ pr_err("KEY: Invalid destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* write key length */
+ length &= KEY_LENGTH_MASK;
+ opcode |= length;
+
+ /* write key command specific flags */
+ if (encrypt_flags & ENC) {
+ /* Encrypted (black) keys must be padded to 8 bytes (CCM) or
+ * 16 bytes (ECB) depending on EKT bit. AES-CCM encrypted keys
+ * (EKT = 1) have 6-byte nonce and 6-byte MAC after padding.
+ */
+ opcode |= KEY_ENC;
+ if (encrypt_flags & EKT) {
+ opcode |= KEY_EKT;
+ length = ALIGN(length, 8);
+ length += 12;
+ } else {
+ length = ALIGN(length, 16);
+ }
+ if (encrypt_flags & TK)
+ opcode |= KEY_TK;
+ }
+ if (encrypt_flags & NWB)
+ opcode |= KEY_NWB;
+ if (encrypt_flags & PTS)
+ opcode |= KEY_PTS;
+
+ /* write general command flags */
+ if (!is_seq_cmd) {
+ if (flags & IMMED)
+ opcode |= KEY_IMM;
+ if (flags & SGF)
+ opcode |= KEY_SGF;
+ } else {
+ if (flags & AIDF)
+ opcode |= KEY_AIDF;
+ if (flags & VLF)
+ opcode |= KEY_VLF;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_KEY_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/load_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/load_cmd.h
new file mode 100644
index 000000000..38e253c22
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/load_cmd.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_LOAD_CMD_H__
+#define __RTA_LOAD_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed length and offset masks for each SEC Era in case DST = DCTRL */
+static const uint32_t load_len_mask_allowed[] = {
+ 0x000000ee,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe
+};
+
+static const uint32_t load_off_mask_allowed[] = {
+ 0x0000000f,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff
+};
+
+#define IMM_MUST 0
+#define IMM_CAN 1
+#define IMM_NO 2
+#define IMM_DSNM 3 /* it doesn't matter the src type */
+
+enum e_lenoff {
+ LENOF_03,
+ LENOF_4,
+ LENOF_48,
+ LENOF_448,
+ LENOF_18,
+ LENOF_32,
+ LENOF_24,
+ LENOF_16,
+ LENOF_8,
+ LENOF_128,
+ LENOF_256,
+ DSNM /* it doesn't matter the length/offset values */
+};
+
+struct load_map {
+ uint32_t dst;
+ uint32_t dst_opcode;
+ enum e_lenoff len_off;
+ uint8_t imm_src;
+
+};
+
+static const struct load_map load_dst[] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { CCTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CHACTRL,
+ LENOF_4, IMM_MUST },
+ { DCTRL, LDST_CLASS_DECO | LDST_IMM | LDST_SRCDST_WORD_DECOCTRL,
+ DSNM, IMM_DSNM },
+ { ICTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_IRQCTRL,
+ LENOF_4, IMM_MUST },
+ { DPOVRD, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_PCLOVRD,
+ LENOF_4, IMM_MUST },
+ { CLRW, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CLRW,
+ LENOF_4, IMM_MUST },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ,
+ LENOF_4, IMM_MUST },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ,
+ LENOF_4, IMM_MUST },
+ { ALTDS1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ALTDS_CLASS1,
+ LENOF_448, IMM_MUST },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ,
+ LENOF_4, IMM_MUST, },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ,
+ LENOF_4, IMM_MUST },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ,
+ LENOF_4, IMM_MUST },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ,
+ LENOF_4, IMM_MUST },
+ { NFIFO, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO,
+ LENOF_48, IMM_MUST },
+ { IFIFO, LDST_SRCDST_BYTE_INFIFO, LENOF_18, IMM_MUST },
+ { OFIFO, LDST_SRCDST_BYTE_OUTFIFO, LENOF_18, IMM_MUST },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0,
+ LENOF_32, IMM_CAN },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1,
+ LENOF_24, IMM_CAN },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2,
+ LENOF_16, IMM_CAN },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3,
+ LENOF_8, IMM_CAN },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { KEY1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { KEY2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF,
+ LENOF_256, IMM_NO },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID,
+ LENOF_448, IMM_MUST },
+/*32*/ { IDFNS, LDST_SRCDST_WORD_IFNSR, LENOF_18, IMM_MUST },
+ { ODFNS, LDST_SRCDST_WORD_OFNSR, LENOF_18, IMM_MUST },
+ { ALTSOURCE, LDST_SRCDST_BYTE_ALTSOURCE, LENOF_18, IMM_MUST },
+/*35*/ { NFIFO_SZL, LDST_SRCDST_WORD_INFO_FIFO_SZL, LENOF_48, IMM_MUST },
+ { NFIFO_SZM, LDST_SRCDST_WORD_INFO_FIFO_SZM, LENOF_03, IMM_MUST },
+ { NFIFO_L, LDST_SRCDST_WORD_INFO_FIFO_L, LENOF_48, IMM_MUST },
+ { NFIFO_M, LDST_SRCDST_WORD_INFO_FIFO_M, LENOF_03, IMM_MUST },
+ { SZL, LDST_SRCDST_WORD_SZL, LENOF_48, IMM_MUST },
+/*40*/ { SZM, LDST_SRCDST_WORD_SZM, LENOF_03, IMM_MUST }
+};
+
+/*
+ * Allowed LOAD destinations for each SEC Era.
+ * Values represent the number of entries from load_dst[] that are supported.
+ */
+static const unsigned int load_dst_sz[] = { 31, 34, 34, 40, 40,
+ 40, 40, 40, 40, 40};
+
+static inline int
+load_check_len_offset(int pos, uint32_t length, uint32_t offset)
+{
+ if ((load_dst[pos].dst == DCTRL) &&
+ ((length & ~load_len_mask_allowed[rta_sec_era]) ||
+ (offset & ~load_off_mask_allowed[rta_sec_era])))
+ goto err;
+
+ switch (load_dst[pos].len_off) {
+ case (LENOF_03):
+ if ((length > 3) || (offset))
+ goto err;
+ break;
+ case (LENOF_4):
+ if ((length != 4) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_48):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_448):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 4) && (offset == 4)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_18):
+ if ((length < 1) || (length > 8) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_32):
+ if ((length > 32) || (offset > 32) || ((offset + length) > 32))
+ goto err;
+ break;
+ case (LENOF_24):
+ if ((length > 24) || (offset > 24) || ((offset + length) > 24))
+ goto err;
+ break;
+ case (LENOF_16):
+ if ((length > 16) || (offset > 16) || ((offset + length) > 16))
+ goto err;
+ break;
+ case (LENOF_8):
+ if ((length > 8) || (offset > 8) || ((offset + length) > 8))
+ goto err;
+ break;
+ case (LENOF_128):
+ if ((length > 128) || (offset > 128) ||
+ ((offset + length) > 128))
+ goto err;
+ break;
+ case (LENOF_256):
+ if ((length < 1) || (length > 256) || ((length + offset) > 256))
+ goto err;
+ break;
+ case (DSNM):
+ break;
+ default:
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static inline int
+rta_load(struct program *program, uint64_t src, uint64_t dst,
+ uint32_t offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ int pos = -1, ret = -EINVAL;
+ unsigned int start_pc = program->current_pc, i;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_LOAD;
+ else
+ opcode = CMD_LOAD;
+
+ if ((length & 0xffffff00) || (offset & 0xffffff00)) {
+ pr_err("LOAD: Bad length/offset passed. Should be 8 bits\n");
+ goto err;
+ }
+
+ if (flags & SGF)
+ opcode |= LDST_SGF;
+ if (flags & VLF)
+ opcode |= LDST_VLF;
+
+ /* check load destination, length and offset and source type */
+ for (i = 0; i < load_dst_sz[rta_sec_era]; i++)
+ if (dst == load_dst[i].dst) {
+ pos = (int)i;
+ break;
+ }
+ if (-1 == pos) {
+ pr_err("LOAD: Invalid dst. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ if (load_dst[pos].imm_src == IMM_NO) {
+ pr_err("LOAD: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= LDST_IMM;
+ } else if (load_dst[pos].imm_src == IMM_MUST) {
+ pr_err("LOAD IMM: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ ret = load_check_len_offset(pos, length, offset);
+ if (ret < 0) {
+ pr_err("LOAD: Invalid length/offset. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= load_dst[pos].dst_opcode;
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if (dst == DESCBUF) {
+ opcode |= (length >> 2);
+ opcode |= ((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* DECO CONTROL: skip writing pointer of imm data */
+ if (dst == DCTRL)
+ return (int)start_pc;
+
+ /*
+ * For data copy, 3 possible ways to specify how to copy data:
+ * - IMMED & !COPY: copy data directly from src( max 8 bytes)
+ * - IMMED & COPY: copy data imm from the location specified by user
+ * - !IMMED and is not SEQ cmd: copy the address
+ */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else if (!(flags & SEQ))
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_LOAD_CMD_H__*/
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/math_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/math_cmd.h
new file mode 100644
index 000000000..cca70f7e0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/math_cmd.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_MATH_CMD_H__
+#define __RTA_MATH_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t math_op1[][2] = {
+/*1*/ { MATH0, MATH_SRC0_REG0 },
+ { MATH1, MATH_SRC0_REG1 },
+ { MATH2, MATH_SRC0_REG2 },
+ { MATH3, MATH_SRC0_REG3 },
+ { SEQINSZ, MATH_SRC0_SEQINLEN },
+ { SEQOUTSZ, MATH_SRC0_SEQOUTLEN },
+ { VSEQINSZ, MATH_SRC0_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC0_VARSEQOUTLEN },
+ { ZERO, MATH_SRC0_ZERO },
+/*10*/ { NONE, 0 }, /* dummy value */
+ { DPOVRD, MATH_SRC0_DPOVRD },
+ { ONE, MATH_SRC0_ONE }
+};
+
+/*
+ * Allowed MATH op1 sources for each SEC Era.
+ * Values represent the number of entries from math_op1[] that are supported.
+ */
+static const unsigned int math_op1_sz[] = {10, 10, 12, 12, 12, 12,
+ 12, 12, 12, 12};
+
+static const uint32_t math_op2[][2] = {
+/*1*/ { MATH0, MATH_SRC1_REG0 },
+ { MATH1, MATH_SRC1_REG1 },
+ { MATH2, MATH_SRC1_REG2 },
+ { MATH3, MATH_SRC1_REG3 },
+ { ABD, MATH_SRC1_INFIFO },
+ { OFIFO, MATH_SRC1_OUTFIFO },
+ { ONE, MATH_SRC1_ONE },
+/*8*/ { NONE, 0 }, /* dummy value */
+ { JOBSRC, MATH_SRC1_JOBSOURCE },
+ { DPOVRD, MATH_SRC1_DPOVRD },
+ { VSEQINSZ, MATH_SRC1_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC1_VARSEQOUTLEN },
+/*13*/ { ZERO, MATH_SRC1_ZERO }
+};
+
+/*
+ * Allowed MATH op2 sources for each SEC Era.
+ * Values represent the number of entries from math_op2[] that are supported.
+ */
+static const unsigned int math_op2_sz[] = {8, 9, 13, 13, 13, 13, 13, 13,
+ 13, 13};
+
+static const uint32_t math_result[][2] = {
+/*1*/ { MATH0, MATH_DEST_REG0 },
+ { MATH1, MATH_DEST_REG1 },
+ { MATH2, MATH_DEST_REG2 },
+ { MATH3, MATH_DEST_REG3 },
+ { SEQINSZ, MATH_DEST_SEQINLEN },
+ { SEQOUTSZ, MATH_DEST_SEQOUTLEN },
+ { VSEQINSZ, MATH_DEST_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_DEST_VARSEQOUTLEN },
+/*9*/ { NONE, MATH_DEST_NONE },
+ { DPOVRD, MATH_DEST_DPOVRD }
+};
+
+/*
+ * Allowed MATH result destinations for each SEC Era.
+ * Values represent the number of entries from math_result[] that are
+ * supported.
+ */
+static const unsigned int math_result_sz[] = {9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 10};
+
+static inline int
+rta_math(struct program *program, uint64_t operand1,
+ uint32_t op, uint64_t operand2, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATH;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (((op == MATH_FUN_BSWAP) && (rta_sec_era < RTA_SEC_ERA_4)) ||
+ ((op == MATH_FUN_ZBYT) && (rta_sec_era < RTA_SEC_ERA_2))) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (options & SWP) {
+ if (rta_sec_era < RTA_SEC_ERA_7) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((options & IFB) ||
+ (!(options & IMMED) && !(options & IMMED2)) ||
+ ((options & IMMED) && (options & IMMED2))) {
+ pr_err("MATH: SWP - invalid configuration. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /*
+ * SHLD operation is different from others and we
+ * assume that we can have _NONE as first operand
+ * or _SEQINSZ as second operand
+ */
+ if ((op != MATH_FUN_SHLD) && ((operand1 == NONE) ||
+ (operand2 == SEQINSZ))) {
+ pr_err("MATH: Invalid operand. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * We first check if it is unary operation. In that
+ * case second operand must be _NONE
+ */
+ if (((op == MATH_FUN_ZBYT) || (op == MATH_FUN_BSWAP)) &&
+ (operand2 != NONE)) {
+ pr_err("MATH: Invalid operand2. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (options & IMMED) {
+ opcode |= MATH_SRC0_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand1, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand1 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write second operand field */
+ if (options & IMMED2) {
+ opcode |= MATH_SRC1_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand2, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand2 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATH: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /*
+ * as we encode operations with their "real" values, we do not
+ * to translate but we do need to validate the value
+ */
+ switch (op) {
+ /*Binary operators */
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_SHLD):
+ /* Unary operators */
+ case (MATH_FUN_ZBYT):
+ case (MATH_FUN_BSWAP):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATH: operator is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= (options & ~(IMMED | IMMED2));
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATH: length is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* Write immediate value */
+ if ((options & IMMED) && !(options & IMMED2)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand1);
+ } else if ((options & IMMED2) && !(options & IMMED)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand2);
+ } else if ((options & IMMED) && (options & IMMED2)) {
+ __rta_out32(program, lower_32_bits(operand1));
+ __rta_out32(program, lower_32_bits(operand2));
+ }
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_mathi(struct program *program, uint64_t operand,
+ uint32_t op, uint8_t imm, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATHI;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ pr_err("MATHI: Command not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (((op == MATH_FUN_FBYT) && (options & SSEL))) {
+ pr_err("MATHI: Illegal combination - FBYT and SSEL. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((options & SWP) && (rta_sec_era < RTA_SEC_ERA_7)) {
+ pr_err("MATHI: SWP not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (!(options & SSEL))
+ ret = __rta_map_opcode((uint32_t)operand, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ else
+ ret = __rta_map_opcode((uint32_t)operand, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATHI: operand not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (!(options & SSEL))
+ opcode |= val;
+ else
+ opcode |= (val << (MATHI_SRC1_SHIFT - MATH_SRC1_SHIFT));
+
+ /* Write second operand field */
+ opcode |= (imm << MATHI_IMM_SHIFT);
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATHI: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= (val << (MATHI_DEST_SHIFT - MATH_DEST_SHIFT));
+
+ /*
+ * as we encode operations with their "real" values, we do not have to
+ * translate but we do need to validate the value
+ */
+ switch (op) {
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_FBYT):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATHI: operator not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= options;
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATHI: length %d not supported. SEC PC: %d; Instr: %d\n",
+ length, program->current_pc,
+ program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_MATH_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/move_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/move_cmd.h
new file mode 100644
index 000000000..d2151c6dd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/move_cmd.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_MOVE_CMD_H__
+#define __RTA_MOVE_CMD_H__
+
+#define MOVE_SET_AUX_SRC 0x01
+#define MOVE_SET_AUX_DST 0x02
+#define MOVE_SET_AUX_LS 0x03
+#define MOVE_SET_LEN_16b 0x04
+
+#define MOVE_SET_AUX_MATH 0x10
+#define MOVE_SET_AUX_MATH_SRC (MOVE_SET_AUX_SRC | MOVE_SET_AUX_MATH)
+#define MOVE_SET_AUX_MATH_DST (MOVE_SET_AUX_DST | MOVE_SET_AUX_MATH)
+
+#define MASK_16b 0xFF
+
+/* MOVE command type */
+#define __MOVE 1
+#define __MOVEB 2
+#define __MOVEDW 3
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t move_src_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_SRC_CLASS1CTX },
+ { CONTEXT2, MOVE_SRC_CLASS2CTX },
+ { OFIFO, MOVE_SRC_OUTFIFO },
+ { DESCBUF, MOVE_SRC_DESCBUF },
+ { MATH0, MOVE_SRC_MATH0 },
+ { MATH1, MOVE_SRC_MATH1 },
+ { MATH2, MOVE_SRC_MATH2 },
+ { MATH3, MOVE_SRC_MATH3 },
+/*9*/ { IFIFOABD, MOVE_SRC_INFIFO },
+ { IFIFOAB1, MOVE_SRC_INFIFO_CL | MOVE_AUX_LS },
+ { IFIFOAB2, MOVE_SRC_INFIFO_CL },
+/*12*/ { ABD, MOVE_SRC_INFIFO_NO_NFIFO },
+ { AB1, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_LS },
+ { AB2, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_MS }
+};
+
+/* Allowed MOVE / MOVE_LEN sources for each SEC Era.
+ * Values represent the number of entries from move_src_table[] that are
+ * supported.
+ */
+static const unsigned int move_src_table_sz[] = {9, 11, 14, 14, 14, 14, 14, 14,
+ 14, 14};
+
+static const uint32_t move_dst_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_DEST_CLASS1CTX },
+ { CONTEXT2, MOVE_DEST_CLASS2CTX },
+ { OFIFO, MOVE_DEST_OUTFIFO },
+ { DESCBUF, MOVE_DEST_DESCBUF },
+ { MATH0, MOVE_DEST_MATH0 },
+ { MATH1, MOVE_DEST_MATH1 },
+ { MATH2, MOVE_DEST_MATH2 },
+ { MATH3, MOVE_DEST_MATH3 },
+ { IFIFOAB1, MOVE_DEST_CLASS1INFIFO },
+ { IFIFOAB2, MOVE_DEST_CLASS2INFIFO },
+ { PKA, MOVE_DEST_PK_A },
+ { KEY1, MOVE_DEST_CLASS1KEY },
+ { KEY2, MOVE_DEST_CLASS2KEY },
+/*14*/ { IFIFO, MOVE_DEST_INFIFO },
+/*15*/ { ALTSOURCE, MOVE_DEST_ALTSOURCE}
+};
+
+/* Allowed MOVE / MOVE_LEN destinations for each SEC Era.
+ * Values represent the number of entries from move_dst_table[] that are
+ * supported.
+ */
+static const
+unsigned int move_dst_table_sz[] = {13, 14, 14, 15, 15, 15, 15, 15, 15, 15};
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt);
+
+static inline int
+math_offset(uint16_t offset);
+
+static inline int
+rta_move(struct program *program, int cmd_type, uint64_t src,
+ uint16_t src_offset, uint64_t dst,
+ uint16_t dst_offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint16_t offset = 0, opt = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_move_len_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (cmd_type != __MOVE)) {
+ pr_err("MOVE: MOVEB / MOVEDW not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* write command type */
+ if (cmd_type == __MOVEB) {
+ opcode = CMD_MOVEB;
+ } else if (cmd_type == __MOVEDW) {
+ opcode = CMD_MOVEDW;
+ } else if (!(flags & IMMED)) {
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ pr_err("MOVE: MOVE_LEN not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((length != MATH0) && (length != MATH1) &&
+ (length != MATH2) && (length != MATH3)) {
+ pr_err("MOVE: MOVE_LEN length must be MATH[0-3]. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode = CMD_MOVE_LEN;
+ is_move_len_cmd = true;
+ } else {
+ opcode = CMD_MOVE;
+ }
+
+ /* write offset first, to check for invalid combinations or incorrect
+ * offset values sooner; decide which offset should be here
+ * (src or dst)
+ */
+ ret = set_move_offset(program, src, src_offset, dst, dst_offset,
+ &offset, &opt);
+ if (ret < 0)
+ goto err;
+
+ opcode |= (offset << MOVE_OFFSET_SHIFT) & MOVE_OFFSET_MASK;
+
+ /* set AUX field if required */
+ if (opt == MOVE_SET_AUX_SRC) {
+ opcode |= ((src_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_DST) {
+ opcode |= ((dst_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_LS) {
+ opcode |= MOVE_AUX_LS;
+ } else if (opt & MOVE_SET_AUX_MATH) {
+ if (opt & MOVE_SET_AUX_SRC)
+ offset = src_offset;
+ else
+ offset = dst_offset;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ if (offset)
+ pr_debug("MOVE: Offset not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era),
+ program->current_pc,
+ program->current_instruction);
+ /* nothing to do for offset = 0 */
+ } else {
+ ret = math_offset(offset);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid offset in MATH register. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode |= (uint32_t)ret;
+ }
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode((uint32_t)src, move_src_table,
+ move_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write destination field */
+ ret = __rta_map_opcode((uint32_t)dst, move_dst_table,
+ move_dst_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write flags */
+ if (flags & (FLUSH1 | FLUSH2))
+ opcode |= MOVE_AUX_MS;
+ if (flags & (LAST2 | LAST1))
+ opcode |= MOVE_AUX_LS;
+ if (flags & WAITCOMP)
+ opcode |= MOVE_WAITCOMP;
+
+ if (!is_move_len_cmd) {
+ /* write length */
+ if (opt == MOVE_SET_LEN_16b)
+ opcode |= (length & (MOVE_OFFSET_MASK | MOVE_LEN_MASK));
+ else
+ opcode |= (length & MOVE_LEN_MASK);
+ } else {
+ /* write mrsel */
+ switch (length) {
+ case (MATH0):
+ /*
+ * opcode |= MOVELEN_MRSEL_MATH0;
+ * MOVELEN_MRSEL_MATH0 is 0
+ */
+ break;
+ case (MATH1):
+ opcode |= MOVELEN_MRSEL_MATH1;
+ break;
+ case (MATH2):
+ opcode |= MOVELEN_MRSEL_MATH2;
+ break;
+ case (MATH3):
+ opcode |= MOVELEN_MRSEL_MATH3;
+ break;
+ }
+
+ /* write size */
+ if (rta_sec_era >= RTA_SEC_ERA_7) {
+ if (flags & SIZE_WORD)
+ opcode |= MOVELEN_SIZE_WORD;
+ else if (flags & SIZE_BYTE)
+ opcode |= MOVELEN_SIZE_BYTE;
+ else if (flags & SIZE_DWORD)
+ opcode |= MOVELEN_SIZE_DWORD;
+ }
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt)
+{
+ switch (src) {
+ case (CONTEXT1):
+ case (CONTEXT2):
+ if (dst == DESCBUF) {
+ *opt = MOVE_SET_AUX_SRC;
+ *offset = dst_offset;
+ } else if ((dst == KEY1) || (dst == KEY2)) {
+ if ((src_offset) && (dst_offset)) {
+ pr_err("MOVE: Bad offset. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (dst_offset) {
+ *opt = MOVE_SET_AUX_LS;
+ *offset = dst_offset;
+ } else {
+ *offset = src_offset;
+ }
+ } else {
+ if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ }
+ break;
+
+ case (OFIFO):
+ if (dst == OFIFO) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) &&
+ (src_offset || dst_offset)) {
+ pr_err("MOVE: Offset should be zero. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ break;
+
+ case (DESCBUF):
+ if ((dst == CONTEXT1) || (dst == CONTEXT2)) {
+ *opt = MOVE_SET_AUX_DST;
+ } else if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (dst == DESCBUF) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Invalid offset alignment. SEC PC: %d; Instr %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ break;
+
+ case (MATH0):
+ case (MATH1):
+ case (MATH2):
+ case (MATH3):
+ if ((dst == OFIFO) || (dst == ALTSOURCE)) {
+ if (src_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = src_offset;
+ } else if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) {
+ *offset = src_offset;
+ } else {
+ *offset = dst_offset;
+
+ /*
+ * This condition is basically the negation of:
+ * dst in { CONTEXT[1-2], MATH[0-3] }
+ */
+ if ((dst != KEY1) && (dst != KEY2))
+ *opt = MOVE_SET_AUX_MATH_SRC;
+ }
+ break;
+
+ case (IFIFOABD):
+ case (IFIFOAB1):
+ case (IFIFOAB2):
+ case (ABD):
+ case (AB1):
+ case (AB2):
+ if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA) || (dst == ALTSOURCE)) {
+ pr_err("MOVE: Bad DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else {
+ if (dst == OFIFO) {
+ *opt = MOVE_SET_LEN_16b;
+ } else {
+ if (dst_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ err:
+ return -EINVAL;
+}
+
+static inline int
+math_offset(uint16_t offset)
+{
+ switch (offset) {
+ case 0:
+ return 0;
+ case 4:
+ return MOVE_AUX_LS;
+ case 6:
+ return MOVE_AUX_MS;
+ case 7:
+ return MOVE_AUX_LS | MOVE_AUX_MS;
+ }
+
+ return -EINVAL;
+}
+
+#endif /* __RTA_MOVE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/nfifo_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/nfifo_cmd.h
new file mode 100644
index 000000000..85092d961
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/nfifo_cmd.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_NFIFO_CMD_H__
+#define __RTA_NFIFO_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t nfifo_src[][2] = {
+/*1*/ { IFIFO, NFIFOENTRY_STYPE_DFIFO },
+ { OFIFO, NFIFOENTRY_STYPE_OFIFO },
+ { PAD, NFIFOENTRY_STYPE_PAD },
+/*4*/ { MSGOUTSNOOP, NFIFOENTRY_STYPE_SNOOP | NFIFOENTRY_DEST_BOTH },
+/*5*/ { ALTSOURCE, NFIFOENTRY_STYPE_ALTSOURCE },
+ { OFIFO_SYNC, NFIFOENTRY_STYPE_OFIFO_SYNC },
+/*7*/ { MSGOUTSNOOP_ALT, NFIFOENTRY_STYPE_SNOOP_ALT | NFIFOENTRY_DEST_BOTH }
+};
+
+/*
+ * Allowed NFIFO LOAD sources for each SEC Era.
+ * Values represent the number of entries from nfifo_src[] that are supported.
+ */
+static const unsigned int nfifo_src_sz[] = {4, 5, 5, 5, 5, 5, 5, 7, 7, 7};
+
+static const uint32_t nfifo_data[][2] = {
+ { MSG, NFIFOENTRY_DTYPE_MSG },
+ { MSG1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_MSG },
+ { MSG2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_MSG },
+ { IV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_IV },
+ { IV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_IV },
+ { ICV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_ICV },
+ { ICV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_ICV },
+ { SAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SAD },
+ { AAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_AAD },
+ { AAD2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_AAD },
+ { AFHA_SBOX, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SBOX },
+ { SKIP, NFIFOENTRY_DTYPE_SKIP },
+ { PKE, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_E },
+ { PKN, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_N },
+ { PKA, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A },
+ { PKA0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A0 },
+ { PKA1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A1 },
+ { PKA2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A2 },
+ { PKA3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A3 },
+ { PKB, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B },
+ { PKB0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B0 },
+ { PKB1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B1 },
+ { PKB2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B2 },
+ { PKB3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B3 },
+ { AB1, NFIFOENTRY_DEST_CLASS1 },
+ { AB2, NFIFOENTRY_DEST_CLASS2 },
+ { ABD, NFIFOENTRY_DEST_DECO }
+};
+
+static const uint32_t nfifo_flags[][2] = {
+/*1*/ { LAST1, NFIFOENTRY_LC1 },
+ { LAST2, NFIFOENTRY_LC2 },
+ { FLUSH1, NFIFOENTRY_FC1 },
+ { BP, NFIFOENTRY_BND },
+ { PAD_ZERO, NFIFOENTRY_PTYPE_ZEROS },
+ { PAD_NONZERO, NFIFOENTRY_PTYPE_RND_NOZEROS },
+ { PAD_INCREMENT, NFIFOENTRY_PTYPE_INCREMENT },
+ { PAD_RANDOM, NFIFOENTRY_PTYPE_RND },
+ { PAD_ZERO_N1, NFIFOENTRY_PTYPE_ZEROS_NZ },
+ { PAD_NONZERO_0, NFIFOENTRY_PTYPE_RND_NZ_LZ },
+ { PAD_N1, NFIFOENTRY_PTYPE_N },
+/*12*/ { PAD_NONZERO_N, NFIFOENTRY_PTYPE_RND_NZ_N },
+ { FLUSH2, NFIFOENTRY_FC2 },
+ { OC, NFIFOENTRY_OC }
+};
+
+/*
+ * Allowed NFIFO LOAD flags for each SEC Era.
+ * Values represent the number of entries from nfifo_flags[] that are supported.
+ */
+static const unsigned int nfifo_flags_sz[] = {12, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14};
+
+static const uint32_t nfifo_pad_flags[][2] = {
+ { BM, NFIFOENTRY_BM },
+ { PS, NFIFOENTRY_PS },
+ { PR, NFIFOENTRY_PR }
+};
+
+/*
+ * Allowed NFIFO LOAD pad flags for each SEC Era.
+ * Values represent the number of entries from nfifo_pad_flags[] that are
+ * supported.
+ */
+static const unsigned int nfifo_pad_flags_sz[] = {2, 2, 2, 2, 3, 3, 3, 3, 3, 3};
+
+static inline int
+rta_nfifo_load(struct program *program, uint32_t src,
+ uint32_t data, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ uint32_t load_cmd = CMD_LOAD | LDST_IMM | LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO;
+ unsigned int start_pc = program->current_pc;
+
+ if ((data == AFHA_SBOX) && (rta_sec_era == RTA_SEC_ERA_7)) {
+ pr_err("NFIFO: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode(src, nfifo_src, nfifo_src_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write type field */
+ ret = __rta_map_opcode(data, nfifo_data, ARRAY_SIZE(nfifo_data), &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid data. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write DL field */
+ if (!(flags & EXT)) {
+ opcode |= length & NFIFOENTRY_DLEN_MASK;
+ load_cmd |= 4;
+ } else {
+ load_cmd |= 8;
+ }
+
+ /* write flags */
+ __rta_map_flags(flags, nfifo_flags, nfifo_flags_sz[rta_sec_era],
+ &opcode);
+
+ /* in case of padding, check the destination */
+ if (src == PAD)
+ __rta_map_flags(flags, nfifo_pad_flags,
+ nfifo_pad_flags_sz[rta_sec_era], &opcode);
+
+ /* write LOAD command first */
+ __rta_out32(program, load_cmd);
+ __rta_out32(program, opcode);
+
+ if (flags & EXT)
+ __rta_out32(program, length & NFIFOENTRY_DLEN_MASK);
+
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_NFIFO_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h
new file mode 100644
index 000000000..9a1788c0f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_OPERATION_CMD_H__
+#define __RTA_OPERATION_CMD_H__
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 70000)
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+#endif
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_alg_aai_aes(uint16_t aai)
+{
+ uint16_t aes_mode = aai & OP_ALG_AESA_MODE_MASK;
+
+ if (aai & OP_ALG_AAI_C2K) {
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ return -1;
+ if ((aes_mode != OP_ALG_AAI_CCM) &&
+ (aes_mode != OP_ALG_AAI_GCM))
+ return -EINVAL;
+ }
+
+ switch (aes_mode) {
+ case OP_ALG_AAI_CBC_CMAC:
+ case OP_ALG_AAI_CTR_CMAC_LTE:
+ case OP_ALG_AAI_CTR_CMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_CTR:
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_OFB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_XTS:
+ case OP_ALG_AAI_CMAC:
+ case OP_ALG_AAI_XCBC_MAC:
+ case OP_ALG_AAI_CCM:
+ case OP_ALG_AAI_GCM:
+ case OP_ALG_AAI_CBC_XCBCMAC:
+ case OP_ALG_AAI_CTR_XCBCMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_des(uint16_t aai)
+{
+ uint16_t aai_code = (uint16_t)(aai & ~OP_ALG_AAI_CHECKODD);
+
+ switch (aai_code) {
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_OFB:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_md5(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_SMAC:
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_sha(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_rng(uint16_t aai)
+{
+ uint16_t rng_mode = aai & OP_ALG_RNG_MODE_MASK;
+ uint16_t rng_sh = aai & OP_ALG_AAI_RNG4_SH_MASK;
+
+ switch (rng_mode) {
+ case OP_ALG_AAI_RNG:
+ case OP_ALG_AAI_RNG_NZB:
+ case OP_ALG_AAI_RNG_OBP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* State Handle bits are valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && rng_sh)
+ return -EINVAL;
+
+ /* PS, AI, SK bits are also valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && (aai &
+ (OP_ALG_AAI_RNG4_PS | OP_ALG_AAI_RNG4_AI | OP_ALG_AAI_RNG4_SK)))
+ return -EINVAL;
+
+ switch (rng_sh) {
+ case OP_ALG_AAI_RNG4_SH_0:
+ case OP_ALG_AAI_RNG4_SH_1:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_crc(uint16_t aai)
+{
+ uint16_t aai_code = aai & OP_ALG_CRC_POLY_MASK;
+
+ switch (aai_code) {
+ case OP_ALG_AAI_802:
+ case OP_ALG_AAI_3385:
+ case OP_ALG_AAI_CUST_POLY:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_kasumi(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_GSM:
+ case OP_ALG_AAI_EDGE:
+ case OP_ALG_AAI_F8:
+ case OP_ALG_AAI_F9:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f9(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f8(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuce(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuca(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+struct alg_aai_map {
+ uint32_t chipher_algo;
+ int (*aai_func)(uint16_t);
+ uint32_t class;
+};
+
+static const struct alg_aai_map alg_table[] = {
+/*1*/ { OP_ALG_ALGSEL_AES, __rta_alg_aai_aes, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_3DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_MD5, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA1, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA224, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA256, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA384, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA512, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_RNG, __rta_alg_aai_rng, OP_TYPE_CLASS1_ALG },
+/*11*/ { OP_ALG_ALGSEL_CRC, __rta_alg_aai_crc, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ARC4, NULL, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F8, __rta_alg_aai_snow_f8, OP_TYPE_CLASS1_ALG },
+/*14*/ { OP_ALG_ALGSEL_KASUMI, __rta_alg_aai_kasumi, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F9, __rta_alg_aai_snow_f9, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ZUCE, __rta_alg_aai_zuce, OP_TYPE_CLASS1_ALG },
+/*17*/ { OP_ALG_ALGSEL_ZUCA, __rta_alg_aai_zuca, OP_TYPE_CLASS2_ALG }
+};
+
+/*
+ * Allowed OPERATION algorithms for each SEC Era.
+ * Values represent the number of entries from alg_table[] that are supported.
+ */
+static const unsigned int alg_table_sz[] = {14, 15, 15, 15, 17, 17,
+ 11, 17, 17, 17};
+
+static inline int
+rta_operation(struct program *program, uint32_t cipher_algo,
+ uint16_t aai, uint8_t algo_state,
+ int icv_checking, int enc)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ unsigned int start_pc = program->current_pc;
+ int ret;
+
+ for (i = 0; i < alg_table_sz[rta_sec_era]; i++) {
+ if (alg_table[i].chipher_algo == cipher_algo) {
+ opcode |= cipher_algo | alg_table[i].class;
+ /* nothing else to verify */
+ if (alg_table[i].aai_func == NULL) {
+ found = 1;
+ break;
+ }
+
+ aai &= OP_ALG_AAI_MASK;
+
+ ret = (*alg_table[i].aai_func)(aai);
+ if (ret < 0) {
+ pr_err("OPERATION: Bad AAI Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= aai;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("OPERATION: Invalid Command. SEC Program Line: %d\n",
+ program->current_pc);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (algo_state) {
+ case OP_ALG_AS_UPDATE:
+ case OP_ALG_AS_INIT:
+ case OP_ALG_AS_FINALIZE:
+ case OP_ALG_AS_INITFINAL:
+ opcode |= algo_state;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (icv_checking) {
+ case ICV_CHECK_DISABLE:
+ /*
+ * opcode |= OP_ALG_ICV_OFF;
+ * OP_ALG_ICV_OFF is 0
+ */
+ break;
+ case ICV_CHECK_ENABLE:
+ opcode |= OP_ALG_ICV_ON;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (enc) {
+ case DIR_DEC:
+ /*
+ * opcode |= OP_ALG_DECRYPT;
+ * OP_ALG_DECRYPT is 0
+ */
+ break;
+ case DIR_ENC:
+ opcode |= OP_ALG_ENCRYPT;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ return ret;
+}
+
+/*
+ * OPERATION PKHA routines
+ */
+static inline int
+__rta_pkha_clearmem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_CLEARMEM_ALL):
+ case (OP_ALG_PKMODE_CLEARMEM_ABE):
+ case (OP_ALG_PKMODE_CLEARMEM_ABN):
+ case (OP_ALG_PKMODE_CLEARMEM_AB):
+ case (OP_ALG_PKMODE_CLEARMEM_AEN):
+ case (OP_ALG_PKMODE_CLEARMEM_AE):
+ case (OP_ALG_PKMODE_CLEARMEM_AN):
+ case (OP_ALG_PKMODE_CLEARMEM_A):
+ case (OP_ALG_PKMODE_CLEARMEM_BEN):
+ case (OP_ALG_PKMODE_CLEARMEM_BE):
+ case (OP_ALG_PKMODE_CLEARMEM_BN):
+ case (OP_ALG_PKMODE_CLEARMEM_B):
+ case (OP_ALG_PKMODE_CLEARMEM_EN):
+ case (OP_ALG_PKMODE_CLEARMEM_N):
+ case (OP_ALG_PKMODE_CLEARMEM_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_mod_arithmetic(uint32_t pkha_op)
+{
+ pkha_op &= (uint32_t)~OP_ALG_PKMODE_OUT_A;
+
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_MULT_IM):
+ case (OP_ALG_PKMODE_MOD_MULT_IM_OM):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_EXPO_TEQ):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM_TEQ):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_F2M_ADD):
+ case (OP_ALG_PKMODE_F2M_MUL):
+ case (OP_ALG_PKMODE_F2M_MUL_IM):
+ case (OP_ALG_PKMODE_F2M_MUL_IM_OM):
+ case (OP_ALG_PKMODE_F2M_EXP):
+ case (OP_ALG_PKMODE_F2M_EXP_TEQ):
+ case (OP_ALG_PKMODE_F2M_AMODN):
+ case (OP_ALG_PKMODE_F2M_INV):
+ case (OP_ALG_PKMODE_F2M_R2):
+ case (OP_ALG_PKMODE_F2M_GCD):
+ case (OP_ALG_PKMODE_F2M_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_copymem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+rta_pkha_operation(struct program *program, uint32_t op_pkha)
+{
+ uint32_t opcode = CMD_OPERATION | OP_TYPE_PK | OP_ALG_PK;
+ uint32_t pkha_func;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ pkha_func = op_pkha & OP_ALG_PK_FUN_MASK;
+
+ switch (pkha_func) {
+ case (OP_ALG_PKMODE_CLEARMEM):
+ ret = __rta_pkha_clearmem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ ret = __rta_pkha_mod_arithmetic(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_COPY_NSZ):
+ case (OP_ALG_PKMODE_COPY_SSZ):
+ ret = __rta_pkha_copymem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ goto err;
+ }
+
+ opcode |= op_pkha;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_OPERATION_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/protocol_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/protocol_cmd.h
new file mode 100644
index 000000000..e9f20703f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/protocol_cmd.h
@@ -0,0 +1,710 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ *
+ */
+
+#ifndef __RTA_PROTOCOL_CMD_H__
+#define __RTA_PROTOCOL_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_ssl_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA:
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+ /* fall through if not Era 7 */
+ case OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA384:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ike_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_IKE_HMAC_MD5:
+ case OP_PCL_IKE_HMAC_SHA1:
+ case OP_PCL_IKE_HMAC_AES128_CBC:
+ case OP_PCL_IKE_HMAC_SHA256:
+ case OP_PCL_IKE_HMAC_SHA384:
+ case OP_PCL_IKE_HMAC_SHA512:
+ case OP_PCL_IKE_HMAC_AES128_CMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ipsec_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_IPSEC_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_IPSEC_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ /* CCM, GCM, GMAC require PROTINFO[7:0] = 0 */
+ if (proto_cls2 == OP_PCL_IPSEC_HMAC_NULL)
+ return 0;
+ return -EINVAL;
+ case OP_PCL_IPSEC_NULL:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_AES_CTR:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (proto_cls2) {
+ case OP_PCL_IPSEC_HMAC_NULL:
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_AES_XCBC_MAC_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ case OP_PCL_IPSEC_AES_CMAC_96:
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_srtp_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_SRTP_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_SRTP_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_SRTP_AES_CTR:
+ switch (proto_cls2) {
+ case OP_PCL_SRTP_HMAC_SHA1_160:
+ return 0;
+ }
+ /* no break */
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_macsec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_MACSEC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wifi_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIFI:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wimax_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIMAX_OFDM:
+ case OP_PCL_WIMAX_OFDMA:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Allowed blob proto flags for each SEC Era */
+static const uint32_t proto_blob_flags[] = {
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
+};
+
+static inline int
+__rta_blob_proto(uint16_t protoinfo)
+{
+ if (protoinfo & ~proto_blob_flags[rta_sec_era])
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_BLOB_FORMAT_MASK) {
+ case OP_PCL_BLOB_FORMAT_NORMAL:
+ case OP_PCL_BLOB_FORMAT_MASTER_VER:
+ case OP_PCL_BLOB_FORMAT_TEST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_BLOB_REG_MASK) {
+ case OP_PCL_BLOB_AFHA_SBOX:
+ if (rta_sec_era < RTA_SEC_ERA_3)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_BLOB_REG_MEMORY:
+ case OP_PCL_BLOB_REG_KEY1:
+ case OP_PCL_BLOB_REG_KEY2:
+ case OP_PCL_BLOB_REG_SPLIT:
+ case OP_PCL_BLOB_REG_PKE:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_dlc_proto(uint16_t protoinfo)
+{
+ if ((rta_sec_era < RTA_SEC_ERA_2) &&
+ (protoinfo & (OP_PCL_PKPROT_DSA_MSG | OP_PCL_PKPROT_HASH_MASK |
+ OP_PCL_PKPROT_EKT_Z | OP_PCL_PKPROT_DECRYPT_Z |
+ OP_PCL_PKPROT_DECRYPT_PRI)))
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_PKPROT_HASH_MASK) {
+ case OP_PCL_PKPROT_HASH_MD5:
+ case OP_PCL_PKPROT_HASH_SHA1:
+ case OP_PCL_PKPROT_HASH_SHA224:
+ case OP_PCL_PKPROT_HASH_SHA256:
+ case OP_PCL_PKPROT_HASH_SHA384:
+ case OP_PCL_PKPROT_HASH_SHA512:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_enc_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_ENC_F_IN:
+ if ((protoinfo & OP_PCL_RSAPROT_FFF_MASK) !=
+ OP_PCL_RSAPROT_FFF_RED)
+ return -EINVAL;
+ break;
+ case OP_PCL_RSAPROT_OP_ENC_F_OUT:
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_dec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_DEC_ND:
+ case OP_PCL_RSAPROT_OP_DEC_PQD:
+ case OP_PCL_RSAPROT_OP_DEC_PQDPDQC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_RSAPROT_PPP_MASK) {
+ case OP_PCL_RSAPROT_PPP_RED:
+ case OP_PCL_RSAPROT_PPP_ENC:
+ case OP_PCL_RSAPROT_PPP_EKT:
+ case OP_PCL_RSAPROT_PPP_TK_ENC:
+ case OP_PCL_RSAPROT_PPP_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (protoinfo & OP_PCL_RSAPROT_FMT_PKCSV15)
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * DKP Protocol - Restrictions on key (SRC,DST) combinations
+ * For e.g. key_in_out[0][0] = 1 means (SRC=IMM,DST=IMM) combination is allowed
+ */
+static const uint8_t key_in_out[4][4] = { {1, 0, 0, 0},
+ {1, 1, 1, 1},
+ {1, 0, 1, 0},
+ {1, 0, 0, 1} };
+
+static inline int
+__rta_dkp_proto(uint16_t protoinfo)
+{
+ int key_src = (protoinfo & OP_PCL_DKP_SRC_MASK) >> OP_PCL_DKP_SRC_SHIFT;
+ int key_dst = (protoinfo & OP_PCL_DKP_DST_MASK) >> OP_PCL_DKP_DST_SHIFT;
+
+ if (!key_in_out[key_src][key_dst]) {
+ pr_err("PROTO_DESC: Invalid DKP key (SRC,DST)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int
+__rta_3g_dcrc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_DCRC_CRC7:
+ case OP_PCL_3G_DCRC_CRC11:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_3g_rlc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_RLC_NULL:
+ case OP_PCL_3G_RLC_KASUMI:
+ case OP_PCL_3G_RLC_SNOW:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_LTE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ break;
+ case OP_PCL_LTE_NULL:
+ case OP_PCL_LTE_SNOW:
+ case OP_PCL_LTE_AES:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_mixed_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_LTE_MIXED_AUTH_MASK) {
+ case OP_PCL_LTE_MIXED_AUTH_NULL:
+ case OP_PCL_LTE_MIXED_AUTH_SNOW:
+ case OP_PCL_LTE_MIXED_AUTH_AES:
+ case OP_PCL_LTE_MIXED_AUTH_ZUC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_LTE_MIXED_ENC_MASK) {
+ case OP_PCL_LTE_MIXED_ENC_NULL:
+ case OP_PCL_LTE_MIXED_ENC_SNOW:
+ case OP_PCL_LTE_MIXED_ENC_AES:
+ case OP_PCL_LTE_MIXED_ENC_ZUC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+struct proto_map {
+ uint32_t optype;
+ uint32_t protid;
+ int (*protoinfo_func)(uint16_t);
+};
+
+static const struct proto_map proto_table[] = {
+/*1*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_SSL30_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSAVERIFY, __rta_dlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC, __rta_ipsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SRTP, __rta_srtp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SSL30, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
+/*21*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_BLOB, __rta_blob_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DIFFIEHELLMAN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSAENCRYPT, __rta_rsa_enc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSADECRYPT, __rta_rsa_dec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_DCRC, __rta_3g_dcrc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_PDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_SDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_USER, __rta_lte_pdcp_proto},
+/*29*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL, __rta_lte_pdcp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_MD5, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA1, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA224, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA256, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA384, __rta_dkp_proto},
+/*35*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA512, __rta_dkp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+/*37*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+/*38*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ __rta_lte_pdcp_mixed_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC_NEW, __rta_ipsec_proto},
+/*40*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_USER_RN,
+ __rta_lte_pdcp_mixed_proto},
+};
+
+/*
+ * Allowed OPERATION protocols for each SEC Era.
+ * Values represent the number of entries from proto_table[] that are supported.
+ */
+static const unsigned int proto_table_sz[] = {21, 29, 29, 29, 29, 35, 37,
+ 40, 40, 40};
+
+static inline int
+rta_proto_operation(struct program *program, uint32_t optype,
+ uint32_t protid, uint16_t protoinfo)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ uint32_t optype_tmp = optype;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ for (i = 0; i < proto_table_sz[rta_sec_era]; i++) {
+ /* clear last bit in optype to match also decap proto */
+ optype_tmp &= (uint32_t)~(1 << OP_TYPE_SHIFT);
+ if (optype_tmp == proto_table[i].optype) {
+ if (proto_table[i].protid == protid) {
+ /* nothing else to verify */
+ if (proto_table[i].protoinfo_func == NULL) {
+ found = 1;
+ break;
+ }
+ /* check protoinfo */
+ ret = (*proto_table[i].protoinfo_func)
+ (protoinfo);
+ if (ret < 0) {
+ pr_err("PROTO_DESC: Bad PROTO Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ pr_err("PROTO_DESC: Operation Type Mismatch. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ __rta_out32(program, opcode | optype | protid | protoinfo);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_dkp_proto(struct program *program, uint32_t protid,
+ uint16_t key_src, uint16_t key_dst,
+ uint16_t keylen, uint64_t key,
+ enum rta_data_type key_type)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int in_words = 0, out_words = 0;
+ int ret;
+
+ key_src &= OP_PCL_DKP_SRC_MASK;
+ key_dst &= OP_PCL_DKP_DST_MASK;
+ keylen &= OP_PCL_DKP_KEY_MASK;
+
+ ret = rta_proto_operation(program, OP_TYPE_UNI_PROTOCOL, protid,
+ key_src | key_dst | keylen);
+ if (ret < 0)
+ return ret;
+
+ if ((key_src == OP_PCL_DKP_SRC_PTR) ||
+ (key_src == OP_PCL_DKP_SRC_SGF)) {
+ __rta_out64(program, program->ps, key);
+ in_words = program->ps ? 2 : 1;
+ } else if (key_src == OP_PCL_DKP_SRC_IMM) {
+ __rta_inline_data(program, key, inline_flags(key_type), keylen);
+ in_words = (unsigned int)((keylen + 3) / 4);
+ }
+
+ if ((key_dst == OP_PCL_DKP_DST_PTR) ||
+ (key_dst == OP_PCL_DKP_DST_SGF)) {
+ out_words = in_words;
+ } else if (key_dst == OP_PCL_DKP_DST_IMM) {
+ out_words = split_key_len(protid) / 4;
+ }
+
+ if (out_words < in_words) {
+ pr_err("PROTO_DESC: DKP doesn't currently support a smaller descriptor\n");
+ program->first_error_pc = start_pc;
+ return -EINVAL;
+ }
+
+ /* If needed, reserve space in resulting descriptor for derived key */
+ program->current_pc += (out_words - in_words);
+
+ return (int)start_pc;
+}
+
+#endif /* __RTA_PROTOCOL_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h
new file mode 100644
index 000000000..f40eaadea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/sec_run_time_asm.h
@@ -0,0 +1,823 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_SEC_RUN_TIME_ASM_H__
+#define __RTA_SEC_RUN_TIME_ASM_H__
+
+#include "desc.h"
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "compat.h"
+#endif
+
+/**
+ * enum rta_sec_era - SEC HW block revisions supported by the RTA library
+ * @RTA_SEC_ERA_1: SEC Era 1
+ * @RTA_SEC_ERA_2: SEC Era 2
+ * @RTA_SEC_ERA_3: SEC Era 3
+ * @RTA_SEC_ERA_4: SEC Era 4
+ * @RTA_SEC_ERA_5: SEC Era 5
+ * @RTA_SEC_ERA_6: SEC Era 6
+ * @RTA_SEC_ERA_7: SEC Era 7
+ * @RTA_SEC_ERA_8: SEC Era 8
+ * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
+ */
+enum rta_sec_era {
+ RTA_SEC_ERA_1,
+ RTA_SEC_ERA_2,
+ RTA_SEC_ERA_3,
+ RTA_SEC_ERA_4,
+ RTA_SEC_ERA_5,
+ RTA_SEC_ERA_6,
+ RTA_SEC_ERA_7,
+ RTA_SEC_ERA_8,
+ RTA_SEC_ERA_9,
+ RTA_SEC_ERA_10,
+ MAX_SEC_ERA = RTA_SEC_ERA_10
+};
+
+/**
+ * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
+ * an unsupported value.
+ */
+#define DEFAULT_SEC_ERA MAX_SEC_ERA
+
+/**
+ * USER_SEC_ERA - translates the SEC Era from internal to user representation.
+ * @sec_era: SEC Era in internal (library) representation
+ */
+#define USER_SEC_ERA(sec_era) (sec_era + 1)
+
+/**
+ * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
+ * @sec_era: SEC Era in user representation
+ */
+#define INTL_SEC_ERA(sec_era) (sec_era - 1)
+
+/**
+ * enum rta_jump_type - Types of action taken by JUMP command
+ * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
+ * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
+ * indicated by the POINTER field after the JUMP command.
+ * @HALT: conditional halt - stop the execution of the current descriptor and
+ * writes PKHA / Math condition bits as status / error code.
+ * @HALT_STATUS: conditional halt with user-specified status - stop the
+ * execution of the current descriptor and writes the value of
+ * "LOCAL OFFSET" JUMP field as status / error code.
+ * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
+ * return address in the Return Address register; subroutine calls
+ * cannot be nested.
+ * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
+ * offset is taken from the Return Address register.
+ * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ */
+enum rta_jump_type {
+ LOCAL_JUMP,
+ FAR_JUMP,
+ HALT,
+ HALT_STATUS,
+ GOSUB,
+ RETURN,
+ LOCAL_JUMP_INC,
+ LOCAL_JUMP_DEC
+};
+
+/**
+ * enum rta_jump_cond - How test conditions are evaluated by JUMP command
+ * @ALL_TRUE: perform action if ALL selected conditions are true
+ * @ALL_FALSE: perform action if ALL selected conditions are false
+ * @ANY_TRUE: perform action if ANY of the selected conditions is true
+ * @ANY_FALSE: perform action if ANY of the selected conditions is false
+ */
+enum rta_jump_cond {
+ ALL_TRUE,
+ ALL_FALSE,
+ ANY_TRUE,
+ ANY_FALSE
+};
+
+/**
+ * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
+ * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
+ * dependencies are allowed between them).
+ * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
+ * "OK to share" in DECO Control Register (DCTRL).
+ * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
+ * completed.
+ * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
+ * loaded.
+ * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
+ * in the shared descriptor associated with the job descriptor.
+ */
+enum rta_share_type {
+ SHR_NEVER,
+ SHR_WAIT,
+ SHR_SERIAL,
+ SHR_ALWAYS,
+ SHR_DEFER
+};
+
+/**
+ * enum rta_data_type - Indicates how is the data provided and how to include it
+ * in the descriptor.
+ * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
+ * physical (bus) address.
+ * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
+ * data address is a virtual address.
+ * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
+ * immediate data; data address is a physical (bus) address
+ * in external memory and CDMA is programmed to transfer the
+ * data into descriptor buffer being built in Workspace Area.
+ */
+enum rta_data_type {
+ RTA_DATA_PTR = 1,
+ RTA_DATA_IMM,
+ RTA_DATA_IMM_DMA
+};
+
+/* Registers definitions */
+enum rta_regs {
+ /* CCB Registers */
+ CONTEXT1 = 1,
+ CONTEXT2,
+ KEY1,
+ KEY2,
+ KEY1SZ,
+ KEY2SZ,
+ ICV1SZ,
+ ICV2SZ,
+ DATA1SZ,
+ DATA2SZ,
+ ALTDS1,
+ IV1SZ,
+ AAD1SZ,
+ MODE1,
+ MODE2,
+ CCTRL,
+ DCTRL,
+ ICTRL,
+ CLRW,
+ CSTAT,
+ IFIFO,
+ NFIFO,
+ OFIFO,
+ PKASZ,
+ PKBSZ,
+ PKNSZ,
+ PKESZ,
+ /* DECO Registers */
+ MATH0,
+ MATH1,
+ MATH2,
+ MATH3,
+ DESCBUF,
+ JOBDESCBUF,
+ SHAREDESCBUF,
+ DPOVRD,
+ DJQDA,
+ DSTAT,
+ DPID,
+ DJQCTRL,
+ ALTSOURCE,
+ SEQINSZ,
+ SEQOUTSZ,
+ VSEQINSZ,
+ VSEQOUTSZ,
+ /* PKHA Registers */
+ PKA,
+ PKN,
+ PKA0,
+ PKA1,
+ PKA2,
+ PKA3,
+ PKB,
+ PKB0,
+ PKB1,
+ PKB2,
+ PKB3,
+ PKE,
+ /* Pseudo registers */
+ AB1,
+ AB2,
+ ABD,
+ IFIFOABD,
+ IFIFOAB1,
+ IFIFOAB2,
+ AFHA_SBOX,
+ MDHA_SPLIT_KEY,
+ JOBSRC,
+ ZERO,
+ ONE,
+ AAD1,
+ IV1,
+ IV2,
+ MSG1,
+ MSG2,
+ MSG,
+ MSG_CKSUM,
+ MSGOUTSNOOP,
+ MSGINSNOOP,
+ ICV1,
+ ICV2,
+ SKIP,
+ NONE,
+ RNGOFIFO,
+ RNG,
+ IDFNS,
+ ODFNS,
+ NFIFOSZ,
+ SZ,
+ PAD,
+ SAD1,
+ AAD2,
+ BIT_DATA,
+ NFIFO_SZL,
+ NFIFO_SZM,
+ NFIFO_L,
+ NFIFO_M,
+ SZL,
+ SZM,
+ JOBDESCBUF_EFF,
+ SHAREDESCBUF_EFF,
+ METADATA,
+ GTR,
+ STR,
+ OFIFO_SYNC,
+ MSGOUTSNOOP_ALT
+};
+
+/* Command flags */
+#define FLUSH1 BIT(0)
+#define LAST1 BIT(1)
+#define LAST2 BIT(2)
+#define IMMED BIT(3)
+#define SGF BIT(4)
+#define VLF BIT(5)
+#define EXT BIT(6)
+#define CONT BIT(7)
+#define SEQ BIT(8)
+#define AIDF BIT(9)
+#define FLUSH2 BIT(10)
+#define CLASS1 BIT(11)
+#define CLASS2 BIT(12)
+#define BOTH BIT(13)
+
+/**
+ * DCOPY - (AIOP only) command param is pointer to external memory
+ *
+ * CDMA must be used to transfer the key via DMA into Workspace Area.
+ * Valid only in combination with IMMED flag.
+ */
+#define DCOPY BIT(30)
+
+#define COPY BIT(31) /* command param is pointer (not immediate)
+ * valid only in combination when IMMED
+ */
+
+#define __COPY_MASK (COPY | DCOPY)
+
+/* SEQ IN/OUT PTR Command specific flags */
+#define RBS BIT(16)
+#define INL BIT(17)
+#define PRE BIT(18)
+#define RTO BIT(19)
+#define RJD BIT(20)
+#define SOP BIT(21)
+#define RST BIT(22)
+#define EWS BIT(23)
+
+#define ENC BIT(14) /* Encrypted Key */
+#define EKT BIT(15) /* AES CCM Encryption (default is
+ * AES ECB Encryption)
+ */
+#define TK BIT(16) /* Trusted Descriptor Key (default is
+ * Job Descriptor Key)
+ */
+#define NWB BIT(17) /* No Write Back Key */
+#define PTS BIT(18) /* Plaintext Store */
+
+/* HEADER Command specific flags */
+#define RIF BIT(16)
+#define DNR BIT(17)
+#define CIF BIT(18)
+#define PD BIT(19)
+#define RSMS BIT(20)
+#define TD BIT(21)
+#define MTD BIT(22)
+#define REO BIT(23)
+#define SHR BIT(24)
+#define SC BIT(25)
+/* Extended HEADER specific flags */
+#define DSV BIT(7)
+#define DSEL_MASK 0x00000007 /* DECO Select */
+#define FTD BIT(8)
+
+/* JUMP Command specific flags */
+#define NIFP BIT(20)
+#define NIP BIT(21)
+#define NOP BIT(22)
+#define NCP BIT(23)
+#define CALM BIT(24)
+
+#define MATH_Z BIT(25)
+#define MATH_N BIT(26)
+#define MATH_NV BIT(27)
+#define MATH_C BIT(28)
+#define PK_0 BIT(29)
+#define PK_GCD_1 BIT(30)
+#define PK_PRIME BIT(31)
+#define SELF BIT(0)
+#define SHRD BIT(1)
+#define JQP BIT(2)
+
+/* NFIFOADD specific flags */
+#define PAD_ZERO BIT(16)
+#define PAD_NONZERO BIT(17)
+#define PAD_INCREMENT BIT(18)
+#define PAD_RANDOM BIT(19)
+#define PAD_ZERO_N1 BIT(20)
+#define PAD_NONZERO_0 BIT(21)
+#define PAD_N1 BIT(23)
+#define PAD_NONZERO_N BIT(24)
+#define OC BIT(25)
+#define BM BIT(26)
+#define PR BIT(27)
+#define PS BIT(28)
+#define BP BIT(29)
+
+/* MOVE Command specific flags */
+#define WAITCOMP BIT(16)
+#define SIZE_WORD BIT(17)
+#define SIZE_BYTE BIT(18)
+#define SIZE_DWORD BIT(19)
+
+/* MATH command specific flags */
+#define IFB MATH_IFB
+#define NFU MATH_NFU
+#define STL MATH_STL
+#define SSEL MATH_SSEL
+#define SWP MATH_SWP
+#define IMMED2 BIT(31)
+
+/**
+ * struct program - descriptor buffer management structure
+ * @current_pc: current offset in descriptor
+ * @current_instruction: current instruction in descriptor
+ * @first_error_pc: offset of the first error in descriptor
+ * @start_pc: start offset in descriptor buffer
+ * @buffer: buffer carrying descriptor
+ * @shrhdr: shared descriptor header
+ * @jobhdr: job descriptor header
+ * @ps: pointer fields size; if ps is true, pointers will be 36bits in
+ * length; if ps is false, pointers will be 32bits in length
+ * @bswap: if true, perform byte swap on a 4-byte boundary
+ */
+struct program {
+ unsigned int current_pc;
+ unsigned int current_instruction;
+ unsigned int first_error_pc;
+ unsigned int start_pc;
+ uint32_t *buffer;
+ uint32_t *shrhdr;
+ uint32_t *jobhdr;
+ bool ps;
+ bool bswap;
+};
+
+static inline void
+rta_program_cntxt_init(struct program *program,
+ uint32_t *buffer, unsigned int offset)
+{
+ program->current_pc = 0;
+ program->current_instruction = 0;
+ program->first_error_pc = 0;
+ program->start_pc = offset;
+ program->buffer = buffer;
+ program->shrhdr = NULL;
+ program->jobhdr = NULL;
+ program->ps = false;
+ program->bswap = false;
+}
+
+static inline int
+rta_program_finalize(struct program *program)
+{
+ /* Descriptor is usually not allowed to go beyond 64 words size */
+ if (program->current_pc > MAX_CAAM_DESCSIZE)
+ pr_warn("Descriptor Size exceeded max limit of 64 words\n");
+
+ /* Descriptor is erroneous */
+ if (program->first_error_pc) {
+ pr_err("Descriptor creation error\n");
+ return -EINVAL;
+ }
+
+ /* Update descriptor length in shared and job descriptor headers */
+ if (program->shrhdr != NULL)
+ *program->shrhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+ else if (program->jobhdr != NULL)
+ *program->jobhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+
+ return (int)program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_36bit_addr(struct program *program)
+{
+ program->ps = true;
+ return program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_bswap(struct program *program)
+{
+ program->bswap = true;
+ return program->current_pc;
+}
+
+static inline void
+__rta_out32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = program->bswap ?
+ swab32(val) : val;
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_be32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_be32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_le32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_le32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out64(struct program *program, bool is_ext, uint64_t val)
+{
+ if (is_ext) {
+ /*
+ * Since we are guaranteed only a 4-byte alignment in the
+ * descriptor buffer, we have to do 2 x 32-bit (word) writes.
+ * For the order of the 2 words to be correct, we need to
+ * take into account the endianness of the CPU.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+#else
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+#endif
+ } else {
+ __rta_out32(program, lower_32_bits(val));
+ }
+}
+
+static inline void __rta_out_be64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_be32(program, upper_32_bits(val));
+ __rta_out_be32(program, lower_32_bits(val));
+ } else {
+ __rta_out_be32(program, lower_32_bits(val));
+ }
+}
+
+static inline void __rta_out_le64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_le32(program, lower_32_bits(val));
+ __rta_out_le32(program, upper_32_bits(val));
+ } else {
+ __rta_out_le32(program, lower_32_bits(val));
+ }
+}
+
+static inline unsigned int
+rta_word(struct program *program, uint32_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, val);
+
+ return start_pc;
+}
+
+static inline unsigned int
+rta_dword(struct program *program, uint64_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out64(program, true, val);
+
+ return start_pc;
+}
+
+static inline uint32_t
+inline_flags(enum rta_data_type data_type)
+{
+ switch (data_type) {
+ case RTA_DATA_PTR:
+ return 0;
+ case RTA_DATA_IMM:
+ return IMMED | COPY;
+ case RTA_DATA_IMM_DMA:
+ return IMMED | DCOPY;
+ default:
+ /* warn and default to RTA_DATA_PTR */
+ pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
+ return 0;
+ }
+}
+
+static inline unsigned int
+rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
+{
+ unsigned int i;
+ unsigned int start_pc = program->current_pc;
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+
+ for (i = 0; i < length; i++)
+ *tmp++ = data[i];
+ program->current_pc += (length + 3) / 4;
+
+ return start_pc;
+}
+
+#if defined(__EWL__) && defined(AIOP)
+static inline void
+__rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
+{ cdma_read(ws_dst, ext_address, size); }
+#else
+static inline void
+__rta_dma_data(void *ws_dst __maybe_unused,
+ uint64_t ext_address __maybe_unused,
+ uint16_t size __maybe_unused)
+{ pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
+#endif /* defined(__EWL__) && defined(AIOP) */
+
+static inline void
+__rta_inline_data(struct program *program, uint64_t data,
+ uint32_t copy_data, uint32_t length)
+{
+ if (!copy_data) {
+ __rta_out64(program, length > 4, data);
+ } else if (copy_data & COPY) {
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+ uint32_t i;
+
+ for (i = 0; i < length; i++)
+ *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
+ program->current_pc += ((length + 3) / 4);
+ } else if (copy_data & DCOPY) {
+ __rta_dma_data(&program->buffer[program->current_pc], data,
+ (uint16_t)length);
+ program->current_pc += ((length + 3) / 4);
+ }
+}
+
+static inline unsigned int
+rta_desc_len(uint32_t *buffer)
+{
+ if ((*buffer & CMD_MASK) == CMD_DESC_HDR) {
+ return *buffer & HDR_DESCLEN_MASK;
+ } else {
+ if (rta_sec_era >= RTA_SEC_ERA_10)
+ return *buffer & HDR_DESCLEN_SHR_MASK_ERA10;
+ else
+ return *buffer & HDR_DESCLEN_SHR_MASK;
+ }
+}
+
+static inline unsigned int
+rta_desc_bytes(uint32_t *buffer)
+{
+ return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
+}
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
+ * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline uint32_t
+split_key_len(uint32_t hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ uint32_t idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (uint32_t)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline uint32_t
+split_key_pad_len(uint32_t hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
+static inline unsigned int
+rta_set_label(struct program *program)
+{
+ return program->current_pc + program->start_pc;
+}
+
+static inline int
+rta_patch_move(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~MOVE_OFFSET_MASK;
+ opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~JUMP_OFFSET_MASK;
+ opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_header(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+ if (rta_sec_era >= RTA_SEC_ERA_10) {
+ opcode &= (uint32_t)~HDR_START_IDX_MASK_ERA10;
+ opcode |= (new_ref << HDR_START_IDX_SHIFT) &
+ HDR_START_IDX_MASK_ERA10;
+ } else {
+ opcode &= (uint32_t)~HDR_START_IDX_MASK;
+ opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+ }
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_load(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = (bswap ? swab32(program->buffer[line]) :
+ program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
+
+ if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
+ opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ else
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_store(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~LDST_OFFSET_MASK;
+
+ switch (opcode & LDST_SRCDST_MASK) {
+ case LDST_SRCDST_WORD_DESCBUF:
+ case LDST_SRCDST_WORD_DESCBUF_JOB:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED:
+ case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
+ opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ break;
+ default:
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+ }
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_raw(struct program *program, int line, unsigned int mask,
+ unsigned int new_val)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~mask;
+ opcode |= new_val & mask;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+__rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
+ unsigned int num_of_entries, uint32_t *val)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++)
+ if (map_table[i][0] == name) {
+ *val = map_table[i][1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void
+__rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
+ unsigned int num_of_entries, uint32_t *opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++) {
+ if (flags_table[i][0] & flags)
+ *opcode |= flags_table[i][1];
+ }
+}
+
+#endif /* __RTA_SEC_RUN_TIME_ASM_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/seq_in_out_ptr_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/seq_in_out_ptr_cmd.h
new file mode 100644
index 000000000..5e6af0c83
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/seq_in_out_ptr_cmd.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_SEQ_IN_OUT_PTR_CMD_H__
+#define __RTA_SEQ_IN_OUT_PTR_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed SEQ IN PTR flags for each SEC Era. */
+static const uint32_t seq_in_ptr_flags[] = {
+ RBS | INL | SGF | PRE | EXT | RTO,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP
+};
+
+/* Allowed SEQ OUT PTR flags for each SEC Era. */
+static const uint32_t seq_out_ptr_flags[] = {
+ SGF | PRE | EXT,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS
+};
+
+static inline int
+rta_seq_in_ptr(struct program *program, uint64_t src,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_IN_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if (flags & ~seq_in_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ IN PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & INL) && (flags & RJD)) {
+ pr_err("SEQ IN PTR: Invalid usage of INL and RJD flags\n");
+ goto err;
+ }
+ if ((src) && (flags & (SOP | RTO | PRE))) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & SOP) && (flags & (RBS | PRE | RTO | EXT))) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and (RBS or PRE or RTO or EXT) flags\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & RBS)
+ opcode |= SQIN_RBS;
+ if (flags & INL)
+ opcode |= SQIN_INL;
+ if (flags & SGF)
+ opcode |= SQIN_SGF;
+ if (flags & PRE)
+ opcode |= SQIN_PRE;
+ if (flags & RTO)
+ opcode |= SQIN_RTO;
+ if (flags & RJD)
+ opcode |= SQIN_RJD;
+ if (flags & SOP)
+ opcode |= SQIN_SOP;
+ if ((length >> 16) || (flags & EXT)) {
+ if (flags & SOP) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and EXT flags\n");
+ goto err;
+ }
+
+ opcode |= SQIN_EXT;
+ } else {
+ opcode |= length & SQIN_LEN_MASK;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQIN_PRE | SQIN_RTO | SQIN_SOP)))
+ __rta_out64(program, program->ps, src);
+
+ /* write extended length field */
+ if (opcode & SQIN_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_seq_out_ptr(struct program *program, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_OUT_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if (flags & ~seq_out_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ OUT PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if ((dst) && (flags & (RTO | PRE))) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & RST) && !(flags & RTO)) {
+ pr_err("SEQ OUT PTR: RST flag must be used with RTO flag\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & SGF)
+ opcode |= SQOUT_SGF;
+ if (flags & PRE)
+ opcode |= SQOUT_PRE;
+ if (flags & RTO)
+ opcode |= SQOUT_RTO;
+ if (flags & RST)
+ opcode |= SQOUT_RST;
+ if (flags & EWS)
+ opcode |= SQOUT_EWS;
+ if ((length >> 16) || (flags & EXT))
+ opcode |= SQOUT_EXT;
+ else
+ opcode |= length & SQOUT_LEN_MASK;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQOUT_PRE | SQOUT_RTO)))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & SQOUT_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_SEQ_IN_OUT_PTR_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/signature_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/signature_cmd.h
new file mode 100644
index 000000000..4f694ac23
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/signature_cmd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SIGNATURE_CMD_H__
+#define __RTA_SIGNATURE_CMD_H__
+
+static inline int
+rta_signature(struct program *program, uint32_t sign_type)
+{
+ uint32_t opcode = CMD_SIGNATURE;
+ unsigned int start_pc = program->current_pc;
+
+ switch (sign_type) {
+ case (SIGN_TYPE_FINAL):
+ case (SIGN_TYPE_FINAL_RESTORE):
+ case (SIGN_TYPE_FINAL_NONZERO):
+ case (SIGN_TYPE_IMM_2):
+ case (SIGN_TYPE_IMM_3):
+ case (SIGN_TYPE_IMM_4):
+ opcode |= sign_type;
+ break;
+ default:
+ pr_err("SIGNATURE Command: Invalid type selection\n");
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_SIGNATURE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/store_cmd.h b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/store_cmd.h
new file mode 100644
index 000000000..5de47d053
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/caamflib/rta/store_cmd.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019 NXP
+ */
+
+#ifndef __RTA_STORE_CMD_H__
+#define __RTA_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t store_src_table[][2] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { DJQDA, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQDAR },
+ { MODE1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { MODE2, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { DJQCTRL, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQCTRL },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DSTAT, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_STAT },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID },
+ { CCTRL, LDST_SRCDST_WORD_CHACTRL },
+ { ICTRL, LDST_SRCDST_WORD_IRQCTRL },
+ { CLRW, LDST_SRCDST_WORD_CLRW },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0 },
+ { CSTAT, LDST_SRCDST_WORD_STAT },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1 },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2 },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF },
+/*30*/ { JOBDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_JOB },
+ { SHAREDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_SHARED },
+/*32*/ { JOBDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_JOB_WE },
+ { SHAREDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_SHARED_WE },
+/*34*/ { GTR, LDST_CLASS_DECO | LDST_SRCDST_WORD_GTR },
+ { STR, LDST_CLASS_DECO | LDST_SRCDST_WORD_STR }
+};
+
+/*
+ * Allowed STORE sources for each SEC ERA.
+ * Values represent the number of entries from source_src_table[] that are
+ * supported.
+ */
+static const unsigned int store_src_table_sz[] = {29, 31, 33, 33,
+ 33, 33, 35, 35,
+ 35, 35};
+
+static inline int
+rta_store(struct program *program, uint64_t src,
+ uint16_t offset, uint64_t dst, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_STORE;
+ else
+ opcode = CMD_STORE;
+
+ /* parameters check */
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ if ((flags & IMMED) && (offset != 0)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((flags & SEQ) && ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) ||
+ (src == SHAREDESCBUF_EFF))) {
+ pr_err("STORE: Invalid SRC type. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (flags & IMMED)
+ opcode |= LDST_IMM;
+
+ if ((flags & SGF) || (flags & VLF))
+ opcode |= LDST_VLF;
+
+ /*
+ * source for data to be stored can be specified as:
+ * - register location; set in src field[9-15];
+ * - if IMMED flag is set, data is set in value field [0-31];
+ * user can give this value as actual value or pointer to data
+ */
+ if (!(flags & IMMED)) {
+ ret = __rta_map_opcode((uint32_t)src, store_src_table,
+ store_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("STORE: Invalid source. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if ((src == DESCBUF) || (src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF)) {
+ opcode |= (length >> 2);
+ opcode |= (uint32_t)((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (uint32_t)(offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF))
+ return (int)start_pc;
+
+ /* for STORE, a pointer to where the data will be stored if needed */
+ if (!(flags & SEQ))
+ __rta_out64(program, program->ps, dst);
+
+ /* for IMMED data, place the data here */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_STORE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/compat.h b/src/spdk/dpdk/drivers/common/dpaax/compat.h
new file mode 100644
index 000000000..90db68ce7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/compat.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ * Copyright 2019 NXP
+ *
+ */
+
+#ifndef __COMPAT_H
+#define __COMPAT_H
+
+#include <sched.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <linux/types.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <limits.h>
+#include <assert.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <error.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#ifndef __maybe_unused
+#define __maybe_unused __rte_unused
+#endif
+#ifndef __always_unused
+#define __always_unused __rte_unused
+#endif
+#ifndef __packed
+#define __packed __rte_packed
+#endif
+#ifndef noinline
+#define noinline __rte_noinline
+#endif
+#define L1_CACHE_BYTES 64
+#define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES)
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+#ifndef pr_crit
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#endif
+#ifndef pr_err
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#endif
+#ifndef pr_warn
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#endif
+#ifndef pr_info
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+#endif
+#ifndef pr_debug
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#else
+#define pr_debug(fmt, args...) {}
+#endif
+#endif
+
+#define DPAA_BUG_ON(x) RTE_ASSERT(x)
+
+/* Required types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+typedef cpu_set_t cpumask_t;
+typedef uint32_t phandle;
+typedef uint32_t gfp_t;
+typedef uint32_t irqreturn_t;
+
+#define ETHER_ADDR_LEN 6
+
+#define IRQ_HANDLED 0
+#define request_irq qbman_request_irq
+#define free_irq qbman_free_irq
+
+#define __iomem
+#define GFP_KERNEL 0
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+/* to be used as an upper-limit only */
+#define NR_CPUS 64
+
+/* Waitqueue stuff */
+typedef struct { } wait_queue_head_t;
+#define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
+#define wake_up(x) do { } while (0)
+
+/* I/O operations */
+static inline u32 in_be32(volatile void *__p)
+{
+ volatile u32 *p = __p;
+ return rte_be_to_cpu_32(*p);
+}
+
+static inline void out_be32(volatile void *__p, u32 val)
+{
+ volatile u32 *p = __p;
+ *p = rte_cpu_to_be_32(val);
+}
+
+#define hwsync() rte_rmb()
+#define lwsync() rte_wmb()
+
+#define dcbt_ro(p) __builtin_prefetch(p, 0)
+#define dcbt_rw(p) __builtin_prefetch(p, 1)
+
+#if defined(RTE_ARCH_ARM64)
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+
+#define dcbit_ro(p) \
+ do { \
+ dccivac(p); \
+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
+ } while (0)
+
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset((p), 0, 32)
+#define dcbz_64(p) memset((p), 0, 64)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+#endif
+
+#define barrier() { asm volatile ("" : : : "memory"); }
+#define cpu_relax barrier
+
+#if defined(RTE_ARCH_ARM64)
+static inline uint64_t mfatb(void)
+{
+ uint64_t ret, ret_new, timeout = 200;
+
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ while (ret != ret_new && timeout--) {
+ ret = ret_new;
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ }
+ DPAA_BUG_ON(!timeout && (ret != ret_new));
+ return ret * 64;
+}
+#else
+
+#define mfatb rte_rdtsc
+
+#endif
+
+/* Spin for a few cycles without bothering the bus */
+static inline void cpu_spin(int cycles)
+{
+ uint64_t now = mfatb();
+
+ while (mfatb() < (now + cycles))
+ ;
+}
+
+/* Qman/Bman API inlines and macros; */
+#ifdef lower_32_bits
+#undef lower_32_bits
+#endif
+#define lower_32_bits(x) ((u32)(x))
+
+#ifdef upper_32_bits
+#undef upper_32_bits
+#endif
+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+
+/*
+ * Swap bytes of a 48-bit value.
+ */
+static inline uint64_t
+__bswap_48(uint64_t x)
+{
+ return ((x & 0x0000000000ffULL) << 40) |
+ ((x & 0x00000000ff00ULL) << 24) |
+ ((x & 0x000000ff0000ULL) << 8) |
+ ((x & 0x0000ff000000ULL) >> 8) |
+ ((x & 0x00ff00000000ULL) >> 24) |
+ ((x & 0xff0000000000ULL) >> 40);
+}
+
+/*
+ * Swap bytes of a 40-bit value.
+ */
+static inline uint64_t
+__bswap_40(uint64_t x)
+{
+ return ((x & 0x00000000ffULL) << 32) |
+ ((x & 0x000000ff00ULL) << 16) |
+ ((x & 0x0000ff0000ULL)) |
+ ((x & 0x00ff000000ULL) >> 16) |
+ ((x & 0xff00000000ULL) >> 32);
+}
+
+/*
+ * Swap bytes of a 24-bit value.
+ */
+static inline uint32_t
+__bswap_24(uint32_t x)
+{
+ return ((x & 0x0000ffULL) << 16) |
+ ((x & 0x00ff00ULL)) |
+ ((x & 0xff0000ULL) >> 16);
+}
+
+#define be64_to_cpu(x) rte_be_to_cpu_64(x)
+#define be32_to_cpu(x) rte_be_to_cpu_32(x)
+#define be16_to_cpu(x) rte_be_to_cpu_16(x)
+
+#define cpu_to_be64(x) rte_cpu_to_be_64(x)
+#if !defined(cpu_to_be32)
+#define cpu_to_be32(x) rte_cpu_to_be_32(x)
+#endif
+#define cpu_to_be16(x) rte_cpu_to_be_16(x)
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define cpu_to_be48(x) __bswap_48(x)
+#define be48_to_cpu(x) __bswap_48(x)
+
+#define cpu_to_be40(x) __bswap_40(x)
+#define be40_to_cpu(x) __bswap_40(x)
+
+#define cpu_to_be24(x) __bswap_24(x)
+#define be24_to_cpu(x) __bswap_24(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define cpu_to_be48(x) (x)
+#define be48_to_cpu(x) (x)
+
+#define cpu_to_be40(x) (x)
+#define be40_to_cpu(x) (x)
+
+#define cpu_to_be24(x) (x)
+#define be24_to_cpu(x) (x)
+
+#endif /* RTE_BIG_ENDIAN */
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+/* memcpy() stuff - when you know alignments in advance */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x3);
+ DPAA_BUG_ON((unsigned long)src & 0x3);
+ DPAA_BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+ u16 *__dest = dest;
+ const u16 *__src = src;
+ size_t __sz = sz >> 1;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x1);
+ DPAA_BUG_ON((unsigned long)src & 0x1);
+ DPAA_BUG_ON(sz & 0x1);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+ u8 *__dest = dest;
+ const u8 *__src = src;
+
+ while (sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/* Allocator stuff */
+#define kmalloc(sz, t) rte_malloc(NULL, sz, 0)
+#define vmalloc(sz) rte_malloc(NULL, sz, 0)
+#define kfree(p) { if (p) rte_free(p); }
+static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
+{
+ void *ptr = rte_malloc(NULL, sz, 0);
+
+ if (ptr)
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
+{
+ void *p;
+
+ if (posix_memalign(&p, 4096, 4096))
+ return 0;
+ memset(p, 0, 4096);
+ return (unsigned long)p;
+}
+
+/* Spinlock stuff */
+#define spinlock_t rte_spinlock_t
+#define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define spin_lock_init(x) rte_spinlock_init(x)
+#define spin_lock_destroy(x)
+#define spin_lock(x) rte_spinlock_lock(x)
+#define spin_unlock(x) rte_spinlock_unlock(x)
+#define spin_lock_irq(x) spin_lock(x)
+#define spin_unlock_irq(x) spin_unlock(x)
+#define spin_lock_irqsave(x, f) spin_lock_irq(x)
+#define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#endif /* __COMPAT_H */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/dpaa_list.h b/src/spdk/dpdk/drivers/common/dpaax/dpaa_list.h
new file mode 100644
index 000000000..e94575982
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/dpaa_list.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_LIST_H
+#define __DPAA_LIST_H
+
+/****************/
+/* Linked-lists */
+/****************/
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+#define COMPAT_LIST_HEAD(n) \
+struct list_head n = { \
+ .prev = &n, \
+ .next = &n \
+}
+
+#define INIT_LIST_HEAD(p) \
+do { \
+ struct list_head *__p298 = (p); \
+ __p298->next = __p298; \
+ __p298->prev = __p298->next; \
+} while (0)
+#define list_entry(node, type, member) \
+ (type *)((void *)node - offsetof(type, member))
+#define list_empty(p) \
+({ \
+ const struct list_head *__p298 = (p); \
+ ((__p298->next == __p298) && (__p298->prev == __p298)); \
+})
+#define list_add(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->next = __l298->next; \
+ __p298->prev = __l298; \
+ __l298->next->prev = __p298; \
+ __l298->next = __p298; \
+} while (0)
+#define list_add_tail(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->prev = __l298->prev; \
+ __p298->next = __l298; \
+ __l298->prev->next = __p298; \
+ __l298->prev = __p298; \
+} while (0)
+#define list_for_each(i, l) \
+ for (i = (l)->next; i != (l); i = i->next)
+#define list_for_each_safe(i, j, l) \
+ for (i = (l)->next, j = i->next; i != (l); \
+ i = j, j = i->next)
+#define list_for_each_entry(i, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
+ i = list_entry(i->name.next, typeof(*i), name))
+#define list_for_each_entry_safe(i, j, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name), \
+ j = list_entry(i->name.next, typeof(*j), name); \
+ &i->name != (l); \
+ i = j, j = list_entry(j->name.next, typeof(*j), name))
+#define list_del(i) \
+do { \
+ (i)->next->prev = (i)->prev; \
+ (i)->prev->next = (i)->next; \
+} while (0)
+
+#endif /* __DPAA_LIST_H */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/dpaa_of.c b/src/spdk/dpdk/drivers/common/dpaax/dpaa_of.c
new file mode 100644
index 000000000..bb2c8fc66
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/dpaa_of.c
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <dpaa_of.h>
+#include <assert.h>
+#include <rte_string_fns.h>
+#include <dpaax_logs.h>
+
+static int alive;
+static struct dt_dir root_dir;
+static const char *base_dir;
+static COMPAT_LIST_HEAD(linear);
+
+static int
+of_open_dir(const char *relative_path, struct dirent ***d)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = scandir(full_path, d, 0, versionsort);
+ if (ret < 0)
+ DPAAX_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+of_close_dir(struct dirent **d, int num)
+{
+ while (num--)
+ free(d[num]);
+ free(d);
+}
+
+static int
+of_open_file(const char *relative_path)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = open(full_path, O_RDONLY);
+ if (ret < 0)
+ DPAAX_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+process_file(struct dirent *dent, struct dt_dir *parent)
+{
+ int fd;
+ struct dt_file *f = malloc(sizeof(*f));
+
+ if (!f) {
+ DPAAX_LOG(DEBUG, "Unable to allocate memory for file node");
+ return;
+ }
+ f->node.is_file = 1;
+ strlcpy(f->node.node.name, dent->d_name, NAME_MAX);
+ snprintf(f->node.node.full_name, PATH_MAX, "%s/%s",
+ parent->node.node.full_name, dent->d_name);
+ f->parent = parent;
+ fd = of_open_file(f->node.node.full_name);
+ if (fd < 0) {
+ DPAAX_LOG(DEBUG, "Unable to open file node");
+ free(f);
+ return;
+ }
+ f->len = read(fd, f->buf, OF_FILE_BUF_MAX);
+ close(fd);
+ if (f->len < 0) {
+ DPAAX_LOG(DEBUG, "Unable to read file node");
+ free(f);
+ return;
+ }
+ list_add_tail(&f->node.list, &parent->files);
+}
+
+static const struct dt_dir *
+node2dir(const struct device_node *n)
+{
+ struct dt_node *dn = container_of((struct device_node *)n,
+ struct dt_node, node);
+ const struct dt_dir *d = container_of(dn, struct dt_dir, node);
+
+ assert(!dn->is_file);
+ return d;
+}
+
+/* process_dir() calls iterate_dir(), but the latter will also call the former
+ * when recursing into sub-directories, so a predeclaration is needed.
+ */
+static int process_dir(const char *relative_path, struct dt_dir *dt);
+
+static int
+iterate_dir(struct dirent **d, int num, struct dt_dir *dt)
+{
+ int loop;
+ /* Iterate the directory contents */
+ for (loop = 0; loop < num; loop++) {
+ struct dt_dir *subdir;
+ int ret;
+ /* Ignore dot files of all types (especially "..") */
+ if (d[loop]->d_name[0] == '.')
+ continue;
+ switch (d[loop]->d_type) {
+ case DT_REG:
+ process_file(d[loop], dt);
+ break;
+ case DT_DIR:
+ subdir = malloc(sizeof(*subdir));
+ if (!subdir) {
+ perror("malloc");
+ return -ENOMEM;
+ }
+ strlcpy(subdir->node.node.name, d[loop]->d_name,
+ NAME_MAX);
+ snprintf(subdir->node.node.full_name, PATH_MAX,
+ "%s/%s", dt->node.node.full_name,
+ d[loop]->d_name);
+ subdir->parent = dt;
+ ret = process_dir(subdir->node.node.full_name, subdir);
+ if (ret)
+ return ret;
+ list_add_tail(&subdir->node.list, &dt->subdirs);
+ break;
+ default:
+ DPAAX_LOG(DEBUG, "Ignoring invalid dt entry %s/%s",
+ dt->node.node.full_name, d[loop]->d_name);
+ }
+ }
+ return 0;
+}
+
+static int
+process_dir(const char *relative_path, struct dt_dir *dt)
+{
+ struct dirent **d;
+ int ret, num;
+
+ dt->node.is_file = 0;
+ INIT_LIST_HEAD(&dt->subdirs);
+ INIT_LIST_HEAD(&dt->files);
+ ret = of_open_dir(relative_path, &d);
+ if (ret < 0)
+ return ret;
+ num = ret;
+ ret = iterate_dir(d, num, dt);
+ of_close_dir(d, num);
+ return (ret < 0) ? ret : 0;
+}
+
+static void
+linear_dir(struct dt_dir *d)
+{
+ struct dt_file *f;
+ struct dt_dir *dd;
+
+ d->compatible = NULL;
+ d->status = NULL;
+ d->lphandle = NULL;
+ d->a_cells = NULL;
+ d->s_cells = NULL;
+ d->reg = NULL;
+ list_for_each_entry(f, &d->files, node.list) {
+ if (!strcmp(f->node.node.name, "compatible")) {
+ if (d->compatible)
+ DPAAX_LOG(DEBUG, "Duplicate compatible in"
+ " %s", d->node.node.full_name);
+ d->compatible = f;
+ } else if (!strcmp(f->node.node.name, "status")) {
+ if (d->status)
+ DPAAX_LOG(DEBUG, "Duplicate status in %s",
+ d->node.node.full_name);
+ d->status = f;
+ } else if (!strcmp(f->node.node.name, "linux,phandle")) {
+ if (d->lphandle)
+ DPAAX_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "phandle")) {
+ if (d->lphandle)
+ DPAAX_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "#address-cells")) {
+ if (d->a_cells)
+ DPAAX_LOG(DEBUG, "Duplicate a_cells in %s",
+ d->node.node.full_name);
+ d->a_cells = f;
+ } else if (!strcmp(f->node.node.name, "#size-cells")) {
+ if (d->s_cells)
+ DPAAX_LOG(DEBUG, "Duplicate s_cells in %s",
+ d->node.node.full_name);
+ d->s_cells = f;
+ } else if (!strcmp(f->node.node.name, "reg")) {
+ if (d->reg)
+ DPAAX_LOG(DEBUG, "Duplicate reg in %s",
+ d->node.node.full_name);
+ d->reg = f;
+ }
+ }
+
+ list_for_each_entry(dd, &d->subdirs, node.list) {
+ list_add_tail(&dd->linear, &linear);
+ linear_dir(dd);
+ }
+}
+
+int
+of_init_path(const char *dt_path)
+{
+ int ret;
+
+ base_dir = dt_path;
+
+ /* This needs to be singleton initialization */
+ DPAAX_HWWARN(alive, "Double-init of device-tree driver!");
+
+ /* Prepare root node (the remaining fields are set in process_dir()) */
+ root_dir.node.node.name[0] = '\0';
+ root_dir.node.node.full_name[0] = '\0';
+ INIT_LIST_HEAD(&root_dir.node.list);
+ root_dir.parent = NULL;
+
+ /* Kick things off... */
+ ret = process_dir("", &root_dir);
+ if (ret) {
+ DPAAX_LOG(ERR, "Unable to parse device tree");
+ return ret;
+ }
+
+ /* Now make a flat, linear list of directories */
+ linear_dir(&root_dir);
+ alive = 1;
+ return 0;
+}
+
+static void
+destroy_dir(struct dt_dir *d)
+{
+ struct dt_file *f, *tmpf;
+ struct dt_dir *dd, *tmpd;
+
+ list_for_each_entry_safe(f, tmpf, &d->files, node.list) {
+ list_del(&f->node.list);
+ free(f);
+ }
+ list_for_each_entry_safe(dd, tmpd, &d->subdirs, node.list) {
+ destroy_dir(dd);
+ list_del(&dd->node.list);
+ free(dd);
+ }
+}
+
+void
+of_finish(void)
+{
+ DPAAX_HWWARN(!alive, "Double-finish of device-tree driver!");
+
+ destroy_dir(&root_dir);
+ INIT_LIST_HEAD(&linear);
+ alive = 0;
+}
+
+static const struct dt_dir *
+next_linear(const struct dt_dir *f)
+{
+ if (f->linear.next == &linear)
+ return NULL;
+ return list_entry(f->linear.next, struct dt_dir, linear);
+}
+
+static int
+check_compatible(const struct dt_file *f, const char *compatible)
+{
+ const char *c = (char *)f->buf;
+ unsigned int len, remains = f->len;
+
+ while (remains) {
+ len = strlen(c);
+ if (!strcmp(c, compatible))
+ return 1;
+
+ if (remains < len + 1)
+ break;
+
+ c += (len + 1);
+ remains -= (len + 1);
+ }
+ return 0;
+}
+
+const struct device_node *
+of_find_compatible_node(const struct device_node *from,
+ const char *type __rte_unused,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (list_empty(&linear))
+ return NULL;
+ if (!from)
+ d = list_entry(linear.next, struct dt_dir, linear);
+ else
+ d = node2dir(from);
+ for (d = next_linear(d); d && (!d->compatible ||
+ !check_compatible(d->compatible,
+ compatible));
+ d = next_linear(d))
+ ;
+ if (d)
+ return &d->node.node;
+ return NULL;
+}
+
+const void *
+of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp)
+{
+ const struct dt_dir *d;
+ const struct dt_file *f;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ d = node2dir(from);
+ list_for_each_entry(f, &d->files, node.list)
+ if (!strcmp(f->node.node.name, name)) {
+ if (lenp)
+ *lenp = f->len;
+ return f->buf;
+ }
+ return NULL;
+}
+
+bool
+of_device_is_available(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+ d = node2dir(dev_node);
+ if (!d->status)
+ return true;
+ if (!strcmp((char *)d->status->buf, "okay"))
+ return true;
+ if (!strcmp((char *)d->status->buf, "ok"))
+ return true;
+ return false;
+}
+
+const struct device_node *
+of_find_node_by_phandle(uint64_t ph)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+ list_for_each_entry(d, &linear, linear)
+ if (d->lphandle && (d->lphandle->len == 4) &&
+ !memcmp(d->lphandle->buf, &ph, 4))
+ return &d->node.node;
+ return NULL;
+}
+
+const struct device_node *
+of_get_parent(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ d = node2dir(dev_node);
+ if (!d->parent)
+ return NULL;
+ return &d->parent->node.node;
+}
+
+const struct device_node *
+of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev)
+{
+ const struct dt_dir *p, *c;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ p = node2dir(dev_node);
+ if (prev) {
+ c = node2dir(prev);
+ DPAAX_HWWARN((c->parent != p), "Parent/child mismatch");
+ if (c->parent != p)
+ return NULL;
+ if (c->node.list.next == &p->subdirs)
+ /* prev was the last child */
+ return NULL;
+ c = list_entry(c->node.list.next, struct dt_dir, node.list);
+ return &c->node.node;
+ }
+ /* Return first child */
+ if (list_empty(&p->subdirs))
+ return NULL;
+ c = list_entry(p->subdirs.next, struct dt_dir, node.list);
+ return &c->node.node;
+}
+
+uint32_t
+of_n_addr_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->a_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->a_cells->buf[0];
+ assert(d->a_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NA;
+}
+
+uint32_t
+of_n_size_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->s_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->s_cells->buf[0];
+ assert(d->s_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NS;
+}
+
+const uint32_t *
+of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags __rte_unused)
+{
+ const struct dt_dir *d;
+ const unsigned char *buf;
+ uint32_t na = of_n_addr_cells(dev_node);
+ uint32_t ns = of_n_size_cells(dev_node);
+
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (!d->reg)
+ return NULL;
+ assert(d->reg->len % ((na + ns) * 4) == 0);
+ assert(d->reg->len / ((na + ns) * 4) > (unsigned int) idx);
+ buf = (const unsigned char *)&d->reg->buf[0];
+ buf += (na + ns) * idx * 4;
+ if (size)
+ for (*size = 0; ns > 0; ns--, na++)
+ *size = (*size << 32) +
+ (((uint32_t)buf[4 * na] << 24) |
+ ((uint32_t)buf[4 * na + 1] << 16) |
+ ((uint32_t)buf[4 * na + 2] << 8) |
+ (uint32_t)buf[4 * na + 3]);
+ return (const uint32_t *)buf;
+}
+
+uint64_t
+of_translate_address(const struct device_node *dev_node,
+ const uint32_t *addr)
+{
+ uint64_t phys_addr, tmp_addr;
+ const struct device_node *parent;
+ const uint32_t *ranges;
+ size_t rlen;
+ uint32_t na, pna;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+ assert(dev_node != NULL);
+
+ na = of_n_addr_cells(dev_node);
+ phys_addr = of_read_number(addr, na);
+
+ dev_node = of_get_parent(dev_node);
+ if (!dev_node)
+ return 0;
+ else if (node2dir(dev_node) == &root_dir)
+ return phys_addr;
+
+ do {
+ pna = of_n_addr_cells(dev_node);
+ parent = of_get_parent(dev_node);
+ if (!parent)
+ return 0;
+
+ ranges = of_get_property(dev_node, "ranges", &rlen);
+ /* "ranges" property is missing. Translation breaks */
+ if (!ranges)
+ return 0;
+ /* "ranges" property is empty. Do 1:1 translation */
+ else if (rlen == 0)
+ continue;
+ else
+ tmp_addr = of_read_number(ranges + na, pna);
+
+ na = pna;
+ dev_node = parent;
+ phys_addr += tmp_addr;
+ } while (node2dir(parent) != &root_dir);
+
+ return phys_addr;
+}
+
+bool
+of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAAX_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (d->compatible && check_compatible(d->compatible, compatible))
+ return true;
+ return false;
+}
+
+static const void *of_get_mac_addr(const struct device_node *np,
+ const char *name)
+{
+ return of_get_property(np, name, NULL);
+}
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+ */
+const void *of_get_mac_address(const struct device_node *np)
+{
+ const void *addr;
+
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
+
+ addr = of_get_mac_addr(np, "local-mac-address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr(np, "address");
+}
diff --git a/src/spdk/dpdk/drivers/common/dpaax/dpaa_of.h b/src/spdk/dpdk/drivers/common/dpaax/dpaa_of.h
new file mode 100644
index 000000000..aed6bf98b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/dpaa_of.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __OF_H
+#define __OF_H
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <glob.h>
+#include <errno.h>
+#include <ctype.h>
+#include <limits.h>
+#include <rte_common.h>
+#include <dpaa_list.h>
+#include <rte_compat.h>
+
+#ifndef OF_INIT_DEFAULT_PATH
+#define OF_INIT_DEFAULT_PATH "/proc/device-tree"
+#endif
+
+#define OF_DEFAULT_NA 1
+#define OF_DEFAULT_NS 1
+
+#define OF_FILE_BUF_MAX 256
+
+/**
+ * Layout of Device Tree:
+ * dt_dir
+ * |- dt_dir
+ * | |- dt_dir
+ * | | |- dt_dir
+ * | | | |- dt_file
+ * | | | ``- dt_file
+ * | | ``- dt_file
+ * | `-dt_file`
+ * ``- dt_file
+ *
+ * +------------------+
+ * |dt_dir |
+ * |+----------------+|
+ * ||dt_node ||
+ * ||+--------------+||
+ * |||device_node |||
+ * ||+--------------+||
+ * || list_dt_nodes ||
+ * |+----------------+|
+ * | list of subdir |
+ * | list of files |
+ * +------------------+
+ */
+
+/**
+ * Device description on of a device node in device tree.
+ */
+struct device_node {
+ char name[NAME_MAX];
+ char full_name[PATH_MAX];
+};
+
+/**
+ * List of device nodes available in a device tree layout
+ */
+struct dt_node {
+ struct device_node node; /**< Property of node */
+ int is_file; /**< FALSE==dir, TRUE==file */
+ struct list_head list; /**< Nodes within a parent subdir */
+};
+
+/**
+ * Types we use to represent directories and files
+ */
+struct dt_file;
+struct dt_dir {
+ struct dt_node node;
+ struct list_head subdirs;
+ struct list_head files;
+ struct list_head linear;
+ struct dt_dir *parent;
+ struct dt_file *compatible;
+ struct dt_file *status;
+ struct dt_file *lphandle;
+ struct dt_file *a_cells;
+ struct dt_file *s_cells;
+ struct dt_file *reg;
+};
+
+struct dt_file {
+ struct dt_node node;
+ struct dt_dir *parent;
+ ssize_t len;
+ uint64_t buf[OF_FILE_BUF_MAX >> 3];
+};
+
+__rte_internal
+const struct device_node *of_find_compatible_node(
+ const struct device_node *from,
+ const char *type __rte_unused,
+ const char *compatible)
+ __attribute__((nonnull(3)));
+
+#define for_each_compatible_node(dev_node, type, compatible) \
+ for (dev_node = of_find_compatible_node(NULL, type, compatible); \
+ dev_node != NULL; \
+ dev_node = of_find_compatible_node(dev_node, type, compatible))
+
+__rte_internal
+const void *of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp) __attribute__((nonnull(2)));
+__rte_internal
+bool of_device_is_available(const struct device_node *dev_node);
+
+__rte_internal
+const struct device_node *of_find_node_by_phandle(uint64_t ph);
+
+__rte_internal
+const struct device_node *of_get_parent(const struct device_node *dev_node);
+
+__rte_internal
+const struct device_node *of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev);
+
+__rte_internal
+const void *of_get_mac_address(const struct device_node *np);
+
+#define for_each_child_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+
+__rte_internal
+uint32_t of_n_addr_cells(const struct device_node *dev_node);
+uint32_t of_n_size_cells(const struct device_node *dev_node);
+
+__rte_internal
+const uint32_t *of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags);
+
+__rte_internal
+uint64_t of_translate_address(const struct device_node *dev_node,
+ const uint32_t *addr) __attribute__((nonnull));
+
+__rte_internal
+bool of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible);
+
+/* of_init() must be called prior to initialisation or use of any driver
+ * subsystem that is device-tree-dependent. Eg. Qman/Bman, config layers, etc.
+ * The path should usually be "/proc/device-tree".
+ */
+__rte_internal
+int of_init_path(const char *dt_path);
+
+/* of_finish() allows a controlled tear-down of the device-tree layer, eg. if a
+ * full reload is desired without a process exit.
+ */
+void of_finish(void);
+
+/* Use of this wrapper is recommended. */
+static inline int of_init(void)
+{
+ return of_init_path(OF_INIT_DEFAULT_PATH);
+}
+
+/* Read a numeric property according to its size and return it as a 64-bit
+ * value.
+ */
+static inline uint64_t of_read_number(const uint32_t *cell, int size)
+{
+ uint64_t r = 0;
+
+ while (size--)
+ r = (r << 32) | be32toh(*(cell++));
+ return r;
+}
+
+#endif /* __OF_H */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.c
new file mode 100644
index 000000000..98b076e09
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.c
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <rte_memory.h>
+
+#include "dpaax_iova_table.h"
+#include "dpaax_logs.h"
+
+/* Global dpaax logger identifier */
+int dpaax_logger;
+
+/* Global table reference */
+struct dpaax_iova_table *dpaax_iova_table_p;
+
+static int dpaax_handle_memevents(void);
+
+/* A structure representing the device-tree node available in /proc/device-tree.
+ */
+struct reg_node {
+ phys_addr_t addr;
+ size_t len;
+};
+
+/* A ntohll equivalent routine
+ * XXX: This is only applicable for 64 bit environment.
+ */
+static void
+rotate_8(unsigned char *arr)
+{
+ uint32_t temp;
+ uint32_t *first_half;
+ uint32_t *second_half;
+
+ first_half = (uint32_t *)(arr);
+ second_half = (uint32_t *)(arr + 4);
+
+ temp = *first_half;
+ *first_half = *second_half;
+ *second_half = temp;
+
+ *first_half = ntohl(*first_half);
+ *second_half = ntohl(*second_half);
+}
+
+/* read_memory_nodes
+ * Memory layout for DPAAx platforms (LS1043, LS1046, LS1088, LS2088, LX2160)
+ * are populated by Uboot and available in device tree:
+ * /proc/device-tree/memory@<address>/reg <= register.
+ * Entries are of the form:
+ * (<8 byte start addr><8 byte length>)(..more similar blocks of start,len>)..
+ *
+ * @param count
+ * OUT populate number of entries found in memory node
+ * @return
+ * Pointer to array of reg_node elements, count size
+ */
+static struct reg_node *
+read_memory_node(unsigned int *count)
+{
+ int fd, ret, i;
+ unsigned int j;
+ glob_t result = {0};
+ struct stat statbuf = {0};
+ char file_data[MEM_NODE_FILE_LEN];
+ struct reg_node *nodes = NULL;
+
+ *count = 0;
+
+ ret = glob(MEM_NODE_PATH_GLOB, 0, NULL, &result);
+ if (ret != 0)
+ ret = glob(MEM_NODE_PATH_GLOB_VM, 0, NULL, &result);
+
+ if (ret != 0) {
+ DPAAX_DEBUG("Unable to glob device-tree memory node (err: %d)",
+ ret);
+ goto out;
+ }
+
+ if (result.gl_pathc != 1) {
+ /* Either more than one memory@<addr> node found, or none.
+ * In either case, cannot work ahead.
+ */
+ DPAAX_DEBUG("Found (%zu) entries in device-tree. Not supported!",
+ result.gl_pathc);
+ goto out;
+ }
+
+ DPAAX_DEBUG("Opening and parsing device-tree node: (%s)",
+ result.gl_pathv[0]);
+ fd = open(result.gl_pathv[0], O_RDONLY);
+ if (fd < 0) {
+ DPAAX_DEBUG("Unable to open the device-tree node: (%s)(fd=%d)",
+ MEM_NODE_PATH_GLOB, fd);
+ goto cleanup;
+ }
+
+ /* Stat to get the file size */
+ ret = fstat(fd, &statbuf);
+ if (ret != 0) {
+ DPAAX_DEBUG("Unable to get device-tree memory node size.");
+ goto cleanup;
+ }
+
+ DPAAX_DEBUG("Size of device-tree mem node: %" PRIu64, statbuf.st_size);
+ if (statbuf.st_size > MEM_NODE_FILE_LEN) {
+ DPAAX_DEBUG("More memory nodes available than assumed.");
+ DPAAX_DEBUG("System may not work properly!");
+ }
+
+ ret = read(fd, file_data, statbuf.st_size > MEM_NODE_FILE_LEN ?
+ MEM_NODE_FILE_LEN : statbuf.st_size);
+ if (ret <= 0) {
+ DPAAX_DEBUG("Unable to read device-tree memory node: (%d)",
+ ret);
+ goto cleanup;
+ }
+
+ /* The reg node should be multiple of 16 bytes, 8 bytes each for addr
+ * and len.
+ */
+ *count = (statbuf.st_size / 16);
+ if ((*count) <= 0 || (statbuf.st_size % 16 != 0)) {
+ DPAAX_DEBUG("Invalid memory node values or count. (size=%" PRIu64 ")",
+ statbuf.st_size);
+ goto cleanup;
+ }
+
+ /* each entry is of 16 bytes, and size/16 is total count of entries */
+ nodes = malloc(sizeof(struct reg_node) * (*count));
+ if (!nodes) {
+ DPAAX_DEBUG("Failure in allocating working memory.");
+ goto cleanup;
+ }
+ memset(nodes, 0, sizeof(struct reg_node) * (*count));
+
+ for (i = 0, j = 0; i < (statbuf.st_size) && j < (*count); i += 16, j++) {
+ memcpy(&nodes[j], file_data + i, 16);
+ /* Rotate (ntohl) each 8 byte entry */
+ rotate_8((unsigned char *)(&(nodes[j].addr)));
+ rotate_8((unsigned char *)(&(nodes[j].len)));
+ }
+
+ DPAAX_DEBUG("Device-tree memory node data:");
+ do {
+ DPAAX_DEBUG(" %08" PRIx64 " %08zu",
+ nodes[j].addr, nodes[j].len);
+ } while (--j);
+
+cleanup:
+ close(fd);
+ globfree(&result);
+out:
+ return nodes;
+}
+
+int
+dpaax_iova_table_populate(void)
+{
+ int ret;
+ unsigned int i, node_count;
+ size_t tot_memory_size, total_table_size;
+ struct reg_node *nodes;
+ struct dpaax_iovat_element *entry;
+
+ /* dpaax_iova_table_p is a singleton - only one instance should be
+ * created.
+ */
+ if (dpaax_iova_table_p) {
+ DPAAX_DEBUG("Multiple allocation attempt for IOVA Table (%p)",
+ dpaax_iova_table_p);
+ /* This can be an error case as well - some path not cleaning
+ * up table - but, for now, it is assumed that if IOVA Table
+ * pointer is valid, table is allocated.
+ */
+ return 0;
+ }
+
+ nodes = read_memory_node(&node_count);
+ if (nodes == NULL) {
+ DPAAX_WARN("PA->VA translation not available;");
+ DPAAX_WARN("Expect performance impact.");
+ return -1;
+ }
+
+ tot_memory_size = 0;
+ for (i = 0; i < node_count; i++)
+ tot_memory_size += nodes[i].len;
+
+ DPAAX_DEBUG("Total available PA memory size: %zu", tot_memory_size);
+
+ /* Total table size = meta data + tot_memory_size/8 */
+ total_table_size = sizeof(struct dpaax_iova_table) +
+ (sizeof(struct dpaax_iovat_element) * node_count) +
+ ((tot_memory_size / DPAAX_MEM_SPLIT) * sizeof(uint64_t));
+
+ /* TODO: This memory doesn't need to shared but needs to be always
+ * pinned to RAM (no swap out) - using hugepage rather than malloc
+ */
+ dpaax_iova_table_p = rte_zmalloc(NULL, total_table_size, 0);
+ if (dpaax_iova_table_p == NULL) {
+ DPAAX_WARN("Unable to allocate memory for PA->VA Table;");
+ DPAAX_WARN("PA->VA translation not available;");
+ DPAAX_WARN("Expect performance impact.");
+ free(nodes);
+ return -1;
+ }
+
+ /* Initialize table */
+ dpaax_iova_table_p->count = node_count;
+ entry = dpaax_iova_table_p->entries;
+
+ DPAAX_DEBUG("IOVA Table entries: (entry start = %p)", (void *)entry);
+ DPAAX_DEBUG("\t(entry),(start),(len),(next)");
+
+ for (i = 0; i < node_count; i++) {
+ /* dpaax_iova_table_p
+ * | dpaax_iova_table_p->entries
+ * | |
+ * | |
+ * V V
+ * +------+------+-------+---+----------+---------+---
+ * |iova_ |entry | entry | | pages | pages |
+ * |table | 1 | 2 |...| entry 1 | entry2 |
+ * +-----'+.-----+-------+---+;---------+;--------+---
+ * \ \ / /
+ * `~~~~~~|~~~~~>pages /
+ * \ /
+ * `~~~~~~~~~~~>pages
+ */
+ entry[i].start = nodes[i].addr;
+ entry[i].len = nodes[i].len;
+ if (i > 0)
+ entry[i].pages = entry[i-1].pages +
+ ((entry[i-1].len/DPAAX_MEM_SPLIT));
+ else
+ entry[i].pages = (uint64_t *)((unsigned char *)entry +
+ (sizeof(struct dpaax_iovat_element) *
+ node_count));
+
+ DPAAX_DEBUG("\t(%u),(%8"PRIx64"),(%8zu),(%8p)",
+ i, entry[i].start, entry[i].len, entry[i].pages);
+ }
+
+ /* Release memory associated with nodes array - not required now */
+ free(nodes);
+
+ DPAAX_DEBUG("Adding mem-event handler");
+ ret = dpaax_handle_memevents();
+ if (ret) {
+ DPAAX_ERR("Unable to add mem-event handler");
+ DPAAX_WARN("Cases with non-buffer pool mem won't work!");
+ }
+
+ return 0;
+}
+
+void
+dpaax_iova_table_depopulate(void)
+{
+ if (dpaax_iova_table_p == NULL)
+ return;
+
+ rte_free(dpaax_iova_table_p->entries);
+ dpaax_iova_table_p = NULL;
+
+ DPAAX_DEBUG("IOVA Table cleanedup");
+}
+
+int
+dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length)
+{
+ int found = 0;
+ unsigned int i;
+ size_t req_length = length, e_offset;
+ struct dpaax_iovat_element *entry;
+ uintptr_t align_vaddr;
+ phys_addr_t align_paddr;
+
+ if (unlikely(dpaax_iova_table_p == NULL))
+ return -1;
+
+ align_paddr = paddr & DPAAX_MEM_SPLIT_MASK;
+ align_vaddr = ((uintptr_t)vaddr & DPAAX_MEM_SPLIT_MASK);
+
+ /* Check if paddr is available in table */
+ entry = dpaax_iova_table_p->entries;
+ for (i = 0; i < dpaax_iova_table_p->count; i++) {
+ if (align_paddr < entry[i].start) {
+ /* Address lower than start, but not found in previous
+ * iteration shouldn't exist.
+ */
+ DPAAX_ERR("Add: Incorrect entry for PA->VA Table"
+ "(%"PRIu64")", paddr);
+ DPAAX_ERR("Add: Lowest address: %"PRIu64"",
+ entry[i].start);
+ return -1;
+ }
+
+ if (align_paddr > (entry[i].start + entry[i].len))
+ continue;
+
+ /* align_paddr >= start && align_paddr < (start + len) */
+ found = 1;
+
+ do {
+ e_offset = ((align_paddr - entry[i].start) / DPAAX_MEM_SPLIT);
+ /* TODO: Whatif something already exists at this
+ * location - is that an error? For now, ignoring the
+ * case.
+ */
+ entry[i].pages[e_offset] = align_vaddr;
+#ifdef RTE_COMMON_DPAAX_DEBUG
+ DPAAX_DEBUG("Added: vaddr=%zu for Phy:%"PRIu64" at %zu"
+ " remaining len %zu", align_vaddr,
+ align_paddr, e_offset, req_length);
+#endif
+ /* Incoming request can be larger than the
+ * DPAAX_MEM_SPLIT size - in which case, multiple
+ * entries in entry->pages[] are filled up.
+ */
+ if (req_length <= DPAAX_MEM_SPLIT)
+ break;
+ align_paddr += DPAAX_MEM_SPLIT;
+ align_vaddr += DPAAX_MEM_SPLIT;
+ req_length -= DPAAX_MEM_SPLIT;
+ } while (1);
+
+ break;
+ }
+
+ if (!found) {
+ /* There might be case where the incoming physical address is
+ * beyond the address discovered in the memory node of
+ * device-tree. Specially if some malloc'd area is used by EAL
+ * and the memevent handlers passes that across. But, this is
+ * not necessarily an error.
+ */
+ DPAAX_DEBUG("Add: Unable to find slot for vaddr:(%p),"
+ " phy(%"PRIu64")",
+ vaddr, paddr);
+ return -1;
+ }
+#ifdef RTE_COMMON_DPAAX_DEBUG
+ DPAAX_DEBUG("Add: Found slot at (%"PRIu64")[(%zu)] for vaddr:(%p),"
+ " phy(%"PRIu64"), len(%zu)", entry[i].start, e_offset,
+ vaddr, paddr, length);
+#endif
+ return 0;
+}
+
+/* dpaax_iova_table_dump
+ * Dump the table, with its entries, on screen. Only works in Debug Mode
+ * Not for weak hearted - the tables can get quite large
+ */
+void
+dpaax_iova_table_dump(void)
+{
+ unsigned int i, j;
+ struct dpaax_iovat_element *entry;
+
+ /* In case DEBUG is not enabled, some 'if' conditions might misbehave
+ * as they have nothing else in them except a DPAAX_DEBUG() which if
+ * tuned out would leave 'if' naked.
+ */
+ if (rte_log_get_global_level() < RTE_LOG_DEBUG) {
+ DPAAX_ERR("Set log level to Debug for PA->Table dump!");
+ return;
+ }
+
+ DPAAX_DEBUG(" === Start of PA->VA Translation Table ===");
+ if (dpaax_iova_table_p == NULL)
+ DPAAX_DEBUG("\tNULL");
+
+ entry = dpaax_iova_table_p->entries;
+ for (i = 0; i < dpaax_iova_table_p->count; i++) {
+ DPAAX_DEBUG("\t(%16i),(%16"PRIu64"),(%16zu),(%16p)",
+ i, entry[i].start, entry[i].len, entry[i].pages);
+ DPAAX_DEBUG("\t\t (PA), (VA)");
+ for (j = 0; j < (entry->len/DPAAX_MEM_SPLIT); j++) {
+ if (entry[i].pages[j] == 0)
+ continue;
+ DPAAX_DEBUG("\t\t(%16"PRIx64"),(%16"PRIx64")",
+ (entry[i].start + (j * sizeof(uint64_t))),
+ entry[i].pages[j]);
+ }
+ }
+ DPAAX_DEBUG(" === End of PA->VA Translation Table ===");
+}
+
+static void
+dpaax_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
+{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ phys_addr_t phys_addr;
+ void *virt_addr;
+ int ret;
+
+ DPAAX_DEBUG("Called with addr=%p, len=%zu", addr, len);
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ phys_addr = rte_mem_virt2phy(ms->addr);
+ virt_addr = ms->addr;
+ map_len = ms->len;
+#ifdef RTE_COMMON_DPAAX_DEBUG
+ DPAAX_DEBUG("Request for %s, va=%p, virt_addr=%p,"
+ "iova=%"PRIu64", map_len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "alloc" : "dealloc",
+ va, virt_addr, phys_addr, map_len);
+#endif
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = dpaax_iova_table_update(phys_addr, virt_addr,
+ map_len);
+ else
+ /* In case of mem_events for MEM_EVENT_FREE, complete
+ * hugepage is released and its PA entry is set to 0.
+ */
+ ret = dpaax_iova_table_update(phys_addr, 0, map_len);
+
+ if (ret != 0) {
+ DPAAX_DEBUG("PA-Table entry update failed. "
+ "Map=%d, addr=%p, len=%zu, err:(%d)",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+}
+
+static int
+dpaax_memevent_walk_memsegs(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len,
+ void *arg __rte_unused)
+{
+ DPAAX_DEBUG("Walking for %p (pa=%"PRIu64") and len %zu",
+ ms->addr, ms->phys_addr, len);
+ dpaax_iova_table_update(rte_mem_virt2phy(ms->addr), ms->addr, len);
+ return 0;
+}
+
+static int
+dpaax_handle_memevents(void)
+{
+ /* First, walk through all memsegs and pin them, before installing
+ * handler. This assures that all memseg which have already been
+ * identified/allocated by EAL, are already part of PA->VA Table. This
+ * is especially for cases where application allocates memory before
+ * the EAL or this is an externally allocated memory passed to EAL.
+ */
+ rte_memseg_contig_walk_thread_unsafe(dpaax_memevent_walk_memsegs, NULL);
+
+ return rte_mem_event_callback_register("dpaax_memevents_cb",
+ dpaax_memevent_cb, NULL);
+}
+
+RTE_INIT(dpaax_log)
+{
+ dpaax_logger = rte_log_register("pmd.common.dpaax");
+ if (dpaax_logger >= 0)
+ rte_log_set_level(dpaax_logger, RTE_LOG_ERR);
+}
diff --git a/src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.h b/src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.h
new file mode 100644
index 000000000..230fba8ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/dpaax_iova_table.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAAX_IOVA_TABLE_H_
+#define _DPAAX_IOVA_TABLE_H_
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <glob.h>
+#include <errno.h>
+#include <arpa/inet.h>
+
+#include <rte_eal.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+struct dpaax_iovat_element {
+ phys_addr_t start; /**< Start address of block of physical pages */
+ size_t len; /**< Difference of end-start for quick access */
+ uint64_t *pages; /**< VA for each physical page in this block */
+};
+
+struct dpaax_iova_table {
+ unsigned int count; /**< No. of blocks of contiguous physical pages */
+ struct dpaax_iovat_element entries[0];
+};
+
+/* Pointer to the table, which is common for DPAA/DPAA2 and only a single
+ * instance is required across net/crypto/event drivers. This table is
+ * populated iff devices are found on the bus.
+ */
+extern struct dpaax_iova_table *dpaax_iova_table_p;
+
+/* Device tree file for memory layout is named 'memory@<addr>' where the 'addr'
+ * is SoC dependent, or even Uboot fixup dependent.
+ */
+#define MEM_NODE_PATH_GLOB "/proc/device-tree/memory[@0-9]*/reg"
+/* For Virtual Machines memory node is at different path (below) */
+#define MEM_NODE_PATH_GLOB_VM "/proc/device-tree/memory/reg"
+/* Device file should be multiple of 16 bytes, each containing 8 byte of addr
+ * and its length. Assuming max of 5 entries.
+ */
+#define MEM_NODE_FILE_LEN ((16 * 5) + 1)
+
+/* Table is made up of DPAAX_MEM_SPLIT elements for each contiguous zone. This
+ * helps avoid separate handling for cases where more than one size of hugepage
+ * is supported.
+ */
+#define DPAAX_MEM_SPLIT (1<<21)
+#define DPAAX_MEM_SPLIT_MASK ~(DPAAX_MEM_SPLIT - 1) /**< Floor aligned */
+#define DPAAX_MEM_SPLIT_MASK_OFF (DPAAX_MEM_SPLIT - 1) /**< Offset */
+
+/* APIs exposed */
+__rte_internal
+int dpaax_iova_table_populate(void);
+__rte_internal
+void dpaax_iova_table_depopulate(void);
+__rte_internal
+int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length);
+__rte_internal
+void dpaax_iova_table_dump(void);
+
+static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __rte_hot;
+
+static inline void *
+dpaax_iova_table_get_va(phys_addr_t paddr) {
+ unsigned int i = 0, index;
+ void *vaddr = 0;
+ phys_addr_t paddr_align = paddr & DPAAX_MEM_SPLIT_MASK;
+ size_t offset = paddr & DPAAX_MEM_SPLIT_MASK_OFF;
+ struct dpaax_iovat_element *entry;
+
+ if (unlikely(dpaax_iova_table_p == NULL))
+ return NULL;
+
+ entry = dpaax_iova_table_p->entries;
+
+ do {
+ if (unlikely(i > dpaax_iova_table_p->count))
+ break;
+
+ if (paddr_align < entry[i].start) {
+ /* Incorrect paddr; Not in memory range */
+ return NULL;
+ }
+
+ if (paddr_align > (entry[i].start + entry[i].len)) {
+ i++;
+ continue;
+ }
+
+ /* paddr > entry->start && paddr <= entry->(start+len) */
+ index = (paddr_align - entry[i].start)/DPAAX_MEM_SPLIT;
+ vaddr = (void *)((uintptr_t)entry[i].pages[index] + offset);
+ break;
+ } while (1);
+
+ return vaddr;
+}
+
+#endif /* _DPAAX_IOVA_TABLE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/dpaax_logs.h b/src/spdk/dpdk/drivers/common/dpaax/dpaax_logs.h
new file mode 100644
index 000000000..180476f67
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/dpaax_logs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAAX_LOGS_H_
+#define _DPAAX_LOGS_H_
+
+#include <rte_log.h>
+
+extern int dpaax_logger;
+
+#ifdef RTE_LIBRTE_DPAAX_DEBUG
+#define DPAAX_HWWARN(cond, fmt, args...) \
+ do {\
+ if (cond) \
+ DPAAX_LOG(DEBUG, "WARN: " fmt, ##args); \
+ } while (0)
+#else
+#define DPAAX_HWWARN(cond, fmt, args...) do { } while (0)
+#endif
+
+#define DPAAX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaax_logger, "dpaax: " fmt "\n", \
+ ##args)
+
+/* Debug logs are with Function names */
+#define DPAAX_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaax_logger, "dpaax: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define DPAAX_INFO(fmt, args...) \
+ DPAAX_LOG(INFO, fmt, ## args)
+#define DPAAX_ERR(fmt, args...) \
+ DPAAX_LOG(ERR, fmt, ## args)
+#define DPAAX_WARN(fmt, args...) \
+ DPAAX_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAAX_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAAX_DP_DEBUG(fmt, args...) \
+ DPAAX_DP_LOG(DEBUG, fmt, ## args)
+#define DPAAX_DP_INFO(fmt, args...) \
+ DPAAX_DP_LOG(INFO, fmt, ## args)
+#define DPAAX_DP_WARN(fmt, args...) \
+ DPAAX_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAAX_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/dpaax/meson.build b/src/spdk/dpdk/drivers/common/dpaax/meson.build
new file mode 100644
index 000000000..0b8bf7bd5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 NXP
+
+if not is_linux
+ build = false
+ reason = 'only supported on linux'
+endif
+
+sources = files('dpaax_iova_table.c', 'dpaa_of.c', 'caamflib.c')
+
+includes += include_directories('caamflib')
+
+cflags += ['-D_GNU_SOURCE']
+if cc.has_argument('-Wno-cast-qual')
+ cflags += '-Wno-cast-qual'
+endif
+if cc.has_argument('-Wno-pointer-arith')
+ cflags += '-Wno-pointer-arith'
+endif
diff --git a/src/spdk/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map b/src/spdk/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map
new file mode 100644
index 000000000..49c775c07
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map
@@ -0,0 +1,26 @@
+DPDK_20.0 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ dpaax_iova_table_depopulate;
+ dpaax_iova_table_dump;
+ dpaax_iova_table_p;
+ dpaax_iova_table_populate;
+ dpaax_iova_table_update;
+ of_device_is_available;
+ of_device_is_compatible;
+ of_find_compatible_node;
+ of_find_node_by_phandle;
+ of_get_address;
+ of_get_mac_address;
+ of_get_next_child;
+ of_get_parent;
+ of_get_property;
+ of_init_path;
+ of_n_addr_cells;
+ of_translate_address;
+ rta_sec_era;
+};
diff --git a/src/spdk/dpdk/drivers/common/iavf/Makefile b/src/spdk/dpdk/drivers/common/iavf/Makefile
new file mode 100644
index 000000000..f06dafd40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_iavf.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+
+EXPORT_MAP := rte_common_iavf_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += iavf_adminq.c
+SRCS-y += iavf_common.c
+SRCS-y += iavf_impl.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/iavf/README b/src/spdk/dpdk/drivers/common/iavf/README
new file mode 100644
index 000000000..f301ff665
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/README
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Intel Corporation
+ */
+
+Intel® IAVF driver
+=================
+
+This directory contains source code of FreeBSD IAVF driver of version
+cid-avf.2020.04.09.tar.gz released by the team which develops
+basic drivers for any IAVF NIC. The directory of base/ contains the
+original source package.
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ iavf_osdep.h
+ iavf_impl.c
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c
new file mode 100644
index 000000000..8bae51a46
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c
@@ -0,0 +1,962 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#include "iavf_status.h"
+#include "iavf_type.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+
+/**
+ * iavf_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ hw->aq.asq.tail = IAVF_VF_ATQT1;
+ hw->aq.asq.head = IAVF_VF_ATQH1;
+ hw->aq.asq.len = IAVF_VF_ATQLEN1;
+ hw->aq.asq.bal = IAVF_VF_ATQBAL1;
+ hw->aq.asq.bah = IAVF_VF_ATQBAH1;
+ hw->aq.arq.tail = IAVF_VF_ARQT1;
+ hw->aq.arq.head = IAVF_VF_ARQH1;
+ hw->aq.arq.len = IAVF_VF_ARQLEN1;
+ hw->aq.arq.bal = IAVF_VF_ARQBAL1;
+ hw->aq.arq.bah = IAVF_VF_ARQBAH1;
+}
+
+/**
+ * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ iavf_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_asq_cmd_details)));
+ if (ret_code) {
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ iavf_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct iavf_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * iavf_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void iavf_free_adminq_asq(struct iavf_hw *hw)
+{
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * iavf_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void iavf_free_adminq_arq(struct iavf_hw *hw)
+{
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+ struct iavf_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ iavf_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+ struct iavf_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ iavf_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * iavf_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * iavf_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * iavf_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * iavf_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * iavf_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum iavf_status iavf_init_asq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = IAVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = iavf_alloc_adminq_asq_ring(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = iavf_alloc_asq_bufs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = iavf_config_asq_regs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_config_regs;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ iavf_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ iavf_free_asq_bufs(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum iavf_status iavf_init_arq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = IAVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = iavf_alloc_adminq_arq_ring(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = iavf_alloc_arq_bufs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = iavf_config_arq_regs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ iavf_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ iavf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = IAVF_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ iavf_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ iavf_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ iavf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = IAVF_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ iavf_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ iavf_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+
+/**
+ * iavf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ iavf_init_spinlock(&hw->aq.asq_spinlock);
+ iavf_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ iavf_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = iavf_init_asq(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = iavf_init_arq(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ iavf_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ iavf_destroy_spinlock(&hw->aq.asq_spinlock);
+ iavf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (iavf_check_asq_alive(hw))
+ iavf_aq_queue_shutdown(hw, true);
+
+ iavf_shutdown_asq(hw);
+ iavf_shutdown_arq(hw);
+ iavf_destroy_spinlock(&hw->aq.asq_spinlock);
+ iavf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * iavf_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 iavf_clean_asq(struct iavf_hw *hw)
+{
+ struct iavf_adminq_ring *asq = &(hw->aq.asq);
+ struct iavf_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct iavf_aq_desc desc_cb;
+ struct iavf_aq_desc *desc;
+
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ IAVF_ADMINQ_CALLBACK cb_func =
+ (IAVF_ADMINQ_CALLBACK)details->callback;
+ iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
+ iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return IAVF_DESC_UNUSED(asq);
+}
+
+/**
+ * iavf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool iavf_asq_done(struct iavf_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * iavf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+ struct iavf_dma_mem *dma_buff = NULL;
+ struct iavf_asq_cmd_details *details;
+ struct iavf_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ iavf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = IAVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = IAVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ iavf_memcpy(details,
+ cmd_details,
+ sizeof(struct iavf_asq_cmd_details),
+ IAVF_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
+ }
+ } else {
+ iavf_memset(details, 0,
+ sizeof(struct iavf_asq_cmd_details),
+ IAVF_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = IAVF_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = IAVF_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (iavf_clean_asq(hw) == 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
+ IAVF_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ iavf_memcpy(dma_buff->va, buff, buff_size,
+ IAVF_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (iavf_asq_done(hw))
+ break;
+ iavf_usec_delay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (iavf_asq_done(hw)) {
+ iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_NONDMA);
+ if (buff != NULL)
+ iavf_memcpy(buff, dma_buff->va, buff_size,
+ IAVF_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ status = IAVF_SUCCESS;
+ else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ status = IAVF_ERR_NOT_READY;
+ else
+ status = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ }
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ iavf_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ iavf_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
+ IAVF_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
+}
+
+/**
+ * iavf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *pending)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct iavf_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ iavf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = IAVF_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & IAVF_AQ_FLAG_ERR) {
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ iavf_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, IAVF_DMA_TO_NONDMA);
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ iavf_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h
new file mode 100644
index 000000000..93214162e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_ADMINQ_H_
+#define _IAVF_ADMINQ_H_
+
+#include "iavf_osdep.h"
+#include "iavf_status.h"
+#include "iavf_adminq_cmd.h"
+
+#define IAVF_ADMINQ_DESC(R, i) \
+ (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+
+#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
+
+struct iavf_adminq_ring {
+ struct iavf_virt_mem dma_head; /* space for dma structures */
+ struct iavf_dma_mem desc_buf; /* descriptor ring memory */
+ struct iavf_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct iavf_dma_mem *asq_bi;
+ struct iavf_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct iavf_asq_cmd_details {
+ void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct iavf_aq_desc *wb_desc;
+};
+
+#define IAVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct iavf_arq_event_info {
+ struct iavf_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct iavf_adminq_info {
+ struct iavf_adminq_ring arq; /* receive queue */
+ struct iavf_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct iavf_spinlock asq_spinlock; /* Send queue spinlock */
+ struct iavf_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum iavf_admin_queue_err asq_last_status;
+ enum iavf_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define IAVF_AQ_LARGE_BUF 512
+#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _IAVF_ADMINQ_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h
new file mode 100644
index 000000000..5b748426a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_ADMINQ_CMD_H_
+#define _IAVF_ADMINQ_CMD_H_
+
+/* This header file defines the iavf Admin Queue commands and is shared between
+ * iavf Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define IAVF_FW_API_VERSION_MAJOR 0x0001
+#define IAVF_FW_API_VERSION_MINOR_X722 0x0006
+#define IAVF_FW_API_VERSION_MINOR_X710 0x0007
+
+#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \
+ IAVF_FW_API_VERSION_MINOR_X710 : \
+ IAVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define IAVF_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+
+struct iavf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define IAVF_AQ_FLAG_DD_SHIFT 0
+#define IAVF_AQ_FLAG_CMP_SHIFT 1
+#define IAVF_AQ_FLAG_ERR_SHIFT 2
+#define IAVF_AQ_FLAG_VFE_SHIFT 3
+#define IAVF_AQ_FLAG_LB_SHIFT 9
+#define IAVF_AQ_FLAG_RD_SHIFT 10
+#define IAVF_AQ_FLAG_VFC_SHIFT 11
+#define IAVF_AQ_FLAG_BUF_SHIFT 12
+#define IAVF_AQ_FLAG_SI_SHIFT 13
+#define IAVF_AQ_FLAG_EI_SHIFT 14
+#define IAVF_AQ_FLAG_FE_SHIFT 15
+
+#define IAVF_AQ_FLAG_DD (1 << IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define IAVF_AQ_FLAG_CMP (1 << IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define IAVF_AQ_FLAG_ERR (1 << IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define IAVF_AQ_FLAG_VFE (1 << IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define IAVF_AQ_FLAG_LB (1 << IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define IAVF_AQ_FLAG_RD (1 << IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define IAVF_AQ_FLAG_VFC (1 << IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define IAVF_AQ_FLAG_BUF (1 << IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define IAVF_AQ_FLAG_SI (1 << IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define IAVF_AQ_FLAG_EI (1 << IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define IAVF_AQ_FLAG_FE (1 << IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum iavf_admin_queue_err {
+ IAVF_AQ_RC_OK = 0, /* success */
+ IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ IAVF_AQ_RC_ENOENT = 2, /* No such element */
+ IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ IAVF_AQ_RC_EIO = 5, /* I/O error */
+ IAVF_AQ_RC_ENXIO = 6, /* No such resource */
+ IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ IAVF_AQ_RC_EAGAIN = 8, /* Try again */
+ IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ IAVF_AQ_RC_EACCES = 10, /* Permission denied */
+ IAVF_AQ_RC_EFAULT = 11, /* Bad address */
+ IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ IAVF_AQ_RC_EEXIST = 13, /* object already exists */
+ IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IAVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum iavf_admin_queue_opc {
+ /* aq commands */
+ iavf_aqc_opc_get_version = 0x0001,
+ iavf_aqc_opc_driver_version = 0x0002,
+ iavf_aqc_opc_queue_shutdown = 0x0003,
+ iavf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ iavf_aqc_opc_request_resource = 0x0008,
+ iavf_aqc_opc_release_resource = 0x0009,
+
+ iavf_aqc_opc_list_func_capabilities = 0x000A,
+ iavf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ iavf_aqc_opc_set_proxy_config = 0x0104,
+ iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ iavf_aqc_opc_mac_address_read = 0x0107,
+ iavf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ iavf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ iavf_aqc_opc_set_wol_filter = 0x0120,
+ iavf_aqc_opc_get_wake_reason = 0x0121,
+ iavf_aqc_opc_clear_all_wol_filters = 0x025E,
+
+ /* internal switch commands */
+ iavf_aqc_opc_get_switch_config = 0x0200,
+ iavf_aqc_opc_add_statistics = 0x0201,
+ iavf_aqc_opc_remove_statistics = 0x0202,
+ iavf_aqc_opc_set_port_parameters = 0x0203,
+ iavf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ iavf_aqc_opc_set_switch_config = 0x0205,
+ iavf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ iavf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ iavf_aqc_opc_add_vsi = 0x0210,
+ iavf_aqc_opc_update_vsi_parameters = 0x0211,
+ iavf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ iavf_aqc_opc_add_pv = 0x0220,
+ iavf_aqc_opc_update_pv_parameters = 0x0221,
+ iavf_aqc_opc_get_pv_parameters = 0x0222,
+
+ iavf_aqc_opc_add_veb = 0x0230,
+ iavf_aqc_opc_update_veb_parameters = 0x0231,
+ iavf_aqc_opc_get_veb_parameters = 0x0232,
+
+ iavf_aqc_opc_delete_element = 0x0243,
+
+ iavf_aqc_opc_add_macvlan = 0x0250,
+ iavf_aqc_opc_remove_macvlan = 0x0251,
+ iavf_aqc_opc_add_vlan = 0x0252,
+ iavf_aqc_opc_remove_vlan = 0x0253,
+ iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ iavf_aqc_opc_add_tag = 0x0255,
+ iavf_aqc_opc_remove_tag = 0x0256,
+ iavf_aqc_opc_add_multicast_etag = 0x0257,
+ iavf_aqc_opc_remove_multicast_etag = 0x0258,
+ iavf_aqc_opc_update_tag = 0x0259,
+ iavf_aqc_opc_add_control_packet_filter = 0x025A,
+ iavf_aqc_opc_remove_control_packet_filter = 0x025B,
+ iavf_aqc_opc_add_cloud_filters = 0x025C,
+ iavf_aqc_opc_remove_cloud_filters = 0x025D,
+ iavf_aqc_opc_clear_wol_switch_filters = 0x025E,
+ iavf_aqc_opc_replace_cloud_filters = 0x025F,
+
+ iavf_aqc_opc_add_mirror_rule = 0x0260,
+ iavf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ iavf_aqc_opc_write_personalization_profile = 0x0270,
+ iavf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ iavf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ iavf_aqc_opc_dcb_updated = 0x0302,
+ iavf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ iavf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ iavf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ iavf_aqc_opc_query_vsi_bw_config = 0x0408,
+ iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ iavf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ iavf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ iavf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ iavf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ iavf_aqc_opc_query_port_ets_config = 0x0419,
+ iavf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ iavf_aqc_opc_suspend_port_tx = 0x041B,
+ iavf_aqc_opc_resume_port_tx = 0x041C,
+ iavf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ iavf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ iavf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+
+ /* phy commands*/
+ iavf_aqc_opc_get_phy_abilities = 0x0600,
+ iavf_aqc_opc_set_phy_config = 0x0601,
+ iavf_aqc_opc_set_mac_config = 0x0603,
+ iavf_aqc_opc_set_link_restart_an = 0x0605,
+ iavf_aqc_opc_get_link_status = 0x0607,
+ iavf_aqc_opc_set_phy_int_mask = 0x0613,
+ iavf_aqc_opc_get_local_advt_reg = 0x0614,
+ iavf_aqc_opc_set_local_advt_reg = 0x0615,
+ iavf_aqc_opc_get_partner_advt = 0x0616,
+ iavf_aqc_opc_set_lb_modes = 0x0618,
+ iavf_aqc_opc_get_phy_wol_caps = 0x0621,
+ iavf_aqc_opc_set_phy_debug = 0x0622,
+ iavf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ iavf_aqc_opc_run_phy_activity = 0x0626,
+ iavf_aqc_opc_set_phy_register = 0x0628,
+ iavf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ iavf_aqc_opc_nvm_read = 0x0701,
+ iavf_aqc_opc_nvm_erase = 0x0702,
+ iavf_aqc_opc_nvm_update = 0x0703,
+ iavf_aqc_opc_nvm_config_read = 0x0704,
+ iavf_aqc_opc_nvm_config_write = 0x0705,
+ iavf_aqc_opc_nvm_progress = 0x0706,
+ iavf_aqc_opc_oem_post_update = 0x0720,
+ iavf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ iavf_aqc_opc_send_msg_to_pf = 0x0801,
+ iavf_aqc_opc_send_msg_to_vf = 0x0802,
+ iavf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ iavf_aqc_opc_alternate_write = 0x0900,
+ iavf_aqc_opc_alternate_write_indirect = 0x0901,
+ iavf_aqc_opc_alternate_read = 0x0902,
+ iavf_aqc_opc_alternate_read_indirect = 0x0903,
+ iavf_aqc_opc_alternate_write_done = 0x0904,
+ iavf_aqc_opc_alternate_set_mode = 0x0905,
+ iavf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ iavf_aqc_opc_lldp_get_mib = 0x0A00,
+ iavf_aqc_opc_lldp_update_mib = 0x0A01,
+ iavf_aqc_opc_lldp_add_tlv = 0x0A02,
+ iavf_aqc_opc_lldp_update_tlv = 0x0A03,
+ iavf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ iavf_aqc_opc_lldp_stop = 0x0A05,
+ iavf_aqc_opc_lldp_start = 0x0A06,
+ iavf_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ iavf_aqc_opc_lldp_set_local_mib = 0x0A08,
+ iavf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ iavf_aqc_opc_add_udp_tunnel = 0x0B00,
+ iavf_aqc_opc_del_udp_tunnel = 0x0B01,
+ iavf_aqc_opc_set_rss_key = 0x0B02,
+ iavf_aqc_opc_set_rss_lut = 0x0B03,
+ iavf_aqc_opc_get_rss_key = 0x0B04,
+ iavf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ iavf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ iavf_aqc_opc_oem_parameter_change = 0xFE00,
+ iavf_aqc_opc_oem_device_status_change = 0xFE01,
+ iavf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ iavf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ iavf_aqc_opc_debug_read_reg = 0xFF03,
+ iavf_aqc_opc_debug_write_reg = 0xFF04,
+ iavf_aqc_opc_debug_modify_reg = 0xFF07,
+ iavf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \
+ { iavf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct iavf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define IAVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown);
+
+#define IAVF_AQC_WOL_PRESERVE_STATUS 0x200
+#define IAVF_AQC_MC_MAG_EN 0x0100
+#define IAVF_AQC_WOL_PRESERVE_ON_PFR 0x0200
+
+struct iavf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT)
+#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ IAVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ IAVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses iavf_aqc_switch_seid for the descriptor
+ */
+struct iavf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion);
+
+#define IAVF_LINK_SPEED_2_5GB_SHIFT 0x0
+#define IAVF_LINK_SPEED_100MB_SHIFT 0x1
+#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define IAVF_LINK_SPEED_10GB_SHIFT 0x3
+#define IAVF_LINK_SPEED_40GB_SHIFT 0x4
+#define IAVF_LINK_SPEED_20GB_SHIFT 0x5
+#define IAVF_LINK_SPEED_25GB_SHIFT 0x6
+#define IAVF_LINK_SPEED_5GB_SHIFT 0x7
+
+enum iavf_aq_link_speed {
+ IAVF_LINK_SPEED_UNKNOWN = 0,
+ IAVF_LINK_SPEED_100MB = (1 << IAVF_LINK_SPEED_100MB_SHIFT),
+ IAVF_LINK_SPEED_1GB = (1 << IAVF_LINK_SPEED_1000MB_SHIFT),
+ IAVF_LINK_SPEED_2_5GB = (1 << IAVF_LINK_SPEED_2_5GB_SHIFT),
+ IAVF_LINK_SPEED_5GB = (1 << IAVF_LINK_SPEED_5GB_SHIFT),
+ IAVF_LINK_SPEED_10GB = (1 << IAVF_LINK_SPEED_10GB_SHIFT),
+ IAVF_LINK_SPEED_40GB = (1 << IAVF_LINK_SPEED_40GB_SHIFT),
+ IAVF_LINK_SPEED_20GB = (1 << IAVF_LINK_SPEED_20GB_SHIFT),
+ IAVF_LINK_SPEED_25GB = (1 << IAVF_LINK_SPEED_25GB_SHIFT),
+};
+
+#define IAVF_AQ_LINK_UP_FUNCTION 0x01
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct iavf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define IAVF_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define IAVF_AQC_CEE_APP_FCOE_MASK (0x7 << IAVF_AQC_CEE_APP_FCOE_SHIFT)
+#define IAVF_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define IAVF_AQC_CEE_APP_ISCSI_MASK (0x7 << IAVF_AQC_CEE_APP_ISCSI_SHIFT)
+#define IAVF_AQC_CEE_APP_FIP_SHIFT 0x8
+#define IAVF_AQC_CEE_APP_FIP_MASK (0x7 << IAVF_AQC_CEE_APP_FIP_SHIFT)
+
+#define IAVF_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define IAVF_AQC_CEE_PG_STATUS_MASK (0x7 << IAVF_AQC_CEE_PG_STATUS_SHIFT)
+#define IAVF_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define IAVF_AQC_CEE_PFC_STATUS_MASK (0x7 << IAVF_AQC_CEE_PFC_STATUS_SHIFT)
+#define IAVF_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define IAVF_AQC_CEE_APP_STATUS_MASK (0x7 << IAVF_AQC_CEE_APP_STATUS_SHIFT)
+#define IAVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define IAVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << IAVF_AQC_CEE_FCOE_STATUS_SHIFT)
+#define IAVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define IAVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << IAVF_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define IAVF_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define IAVF_AQC_CEE_FIP_STATUS_MASK (0x7 << IAVF_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct iavf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct iavf_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+IAVF_CHECK_STRUCT_LEN(0x18, iavf_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct iavf_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x20, iavf_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct iavf_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_set_local_mib);
+
+struct iavf_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x10, iavf_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct iavf_aqc_lldp_stop_start_specific_agent {
+#define IAVF_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define IAVF_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << IAVF_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_stop_start_specific_agent);
+
+struct iavf_aqc_get_set_rss_key {
+#define IAVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key);
+
+struct iavf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data);
+
+struct iavf_aqc_get_set_rss_lut {
+#define IAVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut);
+#endif /* _IAVF_ADMINQ_CMD_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h b/src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h
new file mode 100644
index 000000000..7b7a205cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_ALLOC_H_
+#define _IAVF_ALLOC_H_
+
+struct iavf_hw;
+
+/* Memory allocation types */
+enum iavf_memory_type {
+ iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ iavf_mem_asq_buf = 1,
+ iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ iavf_mem_arq_ring = 3, /* ARQ descriptor ring */
+ iavf_mem_atq_ring = 4, /* ATQ descriptor ring */
+ iavf_mem_pd = 5, /* Page Descriptor */
+ iavf_mem_bp = 6, /* Backing Page - 4KB */
+ iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ iavf_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ enum iavf_memory_type type,
+ u64 size, u32 alignment);
+enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem);
+enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem,
+ u32 size);
+enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem);
+
+#endif /* _IAVF_ALLOC_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_common.c b/src/spdk/dpdk/drivers/common/iavf/iavf_common.c
new file mode 100644
index 000000000..57ec1ff39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_common.c
@@ -0,0 +1,1025 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#include "iavf_type.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+#include "virtchnl.h"
+
+/**
+ * iavf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+
+ DEBUGFUNC("iavf_set_mac_type\n");
+
+ if (hw->vendor_id == IAVF_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case IAVF_DEV_ID_X722_VF:
+ hw->mac.type = IAVF_MAC_X722_VF;
+ break;
+ case IAVF_DEV_ID_VF:
+ case IAVF_DEV_ID_VF_HV:
+ case IAVF_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = IAVF_MAC_VF;
+ break;
+ default:
+ hw->mac.type = IAVF_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("iavf_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * iavf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case IAVF_AQ_RC_OK:
+ return "OK";
+ case IAVF_AQ_RC_EPERM:
+ return "IAVF_AQ_RC_EPERM";
+ case IAVF_AQ_RC_ENOENT:
+ return "IAVF_AQ_RC_ENOENT";
+ case IAVF_AQ_RC_ESRCH:
+ return "IAVF_AQ_RC_ESRCH";
+ case IAVF_AQ_RC_EINTR:
+ return "IAVF_AQ_RC_EINTR";
+ case IAVF_AQ_RC_EIO:
+ return "IAVF_AQ_RC_EIO";
+ case IAVF_AQ_RC_ENXIO:
+ return "IAVF_AQ_RC_ENXIO";
+ case IAVF_AQ_RC_E2BIG:
+ return "IAVF_AQ_RC_E2BIG";
+ case IAVF_AQ_RC_EAGAIN:
+ return "IAVF_AQ_RC_EAGAIN";
+ case IAVF_AQ_RC_ENOMEM:
+ return "IAVF_AQ_RC_ENOMEM";
+ case IAVF_AQ_RC_EACCES:
+ return "IAVF_AQ_RC_EACCES";
+ case IAVF_AQ_RC_EFAULT:
+ return "IAVF_AQ_RC_EFAULT";
+ case IAVF_AQ_RC_EBUSY:
+ return "IAVF_AQ_RC_EBUSY";
+ case IAVF_AQ_RC_EEXIST:
+ return "IAVF_AQ_RC_EEXIST";
+ case IAVF_AQ_RC_EINVAL:
+ return "IAVF_AQ_RC_EINVAL";
+ case IAVF_AQ_RC_ENOTTY:
+ return "IAVF_AQ_RC_ENOTTY";
+ case IAVF_AQ_RC_ENOSPC:
+ return "IAVF_AQ_RC_ENOSPC";
+ case IAVF_AQ_RC_ENOSYS:
+ return "IAVF_AQ_RC_ENOSYS";
+ case IAVF_AQ_RC_ERANGE:
+ return "IAVF_AQ_RC_ERANGE";
+ case IAVF_AQ_RC_EFLUSHED:
+ return "IAVF_AQ_RC_EFLUSHED";
+ case IAVF_AQ_RC_BAD_ADDR:
+ return "IAVF_AQ_RC_BAD_ADDR";
+ case IAVF_AQ_RC_EMODE:
+ return "IAVF_AQ_RC_EMODE";
+ case IAVF_AQ_RC_EFBIG:
+ return "IAVF_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
+{
+ switch (stat_err) {
+ case IAVF_SUCCESS:
+ return "OK";
+ case IAVF_ERR_NVM:
+ return "IAVF_ERR_NVM";
+ case IAVF_ERR_NVM_CHECKSUM:
+ return "IAVF_ERR_NVM_CHECKSUM";
+ case IAVF_ERR_PHY:
+ return "IAVF_ERR_PHY";
+ case IAVF_ERR_CONFIG:
+ return "IAVF_ERR_CONFIG";
+ case IAVF_ERR_PARAM:
+ return "IAVF_ERR_PARAM";
+ case IAVF_ERR_MAC_TYPE:
+ return "IAVF_ERR_MAC_TYPE";
+ case IAVF_ERR_UNKNOWN_PHY:
+ return "IAVF_ERR_UNKNOWN_PHY";
+ case IAVF_ERR_LINK_SETUP:
+ return "IAVF_ERR_LINK_SETUP";
+ case IAVF_ERR_ADAPTER_STOPPED:
+ return "IAVF_ERR_ADAPTER_STOPPED";
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ return "IAVF_ERR_INVALID_MAC_ADDR";
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
+ case IAVF_ERR_MASTER_REQUESTS_PENDING:
+ return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ return "IAVF_ERR_INVALID_LINK_SETTINGS";
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
+ case IAVF_ERR_RESET_FAILED:
+ return "IAVF_ERR_RESET_FAILED";
+ case IAVF_ERR_SWFW_SYNC:
+ return "IAVF_ERR_SWFW_SYNC";
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ return "IAVF_ERR_NO_AVAILABLE_VSI";
+ case IAVF_ERR_NO_MEMORY:
+ return "IAVF_ERR_NO_MEMORY";
+ case IAVF_ERR_BAD_PTR:
+ return "IAVF_ERR_BAD_PTR";
+ case IAVF_ERR_RING_FULL:
+ return "IAVF_ERR_RING_FULL";
+ case IAVF_ERR_INVALID_PD_ID:
+ return "IAVF_ERR_INVALID_PD_ID";
+ case IAVF_ERR_INVALID_QP_ID:
+ return "IAVF_ERR_INVALID_QP_ID";
+ case IAVF_ERR_INVALID_CQ_ID:
+ return "IAVF_ERR_INVALID_CQ_ID";
+ case IAVF_ERR_INVALID_CEQ_ID:
+ return "IAVF_ERR_INVALID_CEQ_ID";
+ case IAVF_ERR_INVALID_AEQ_ID:
+ return "IAVF_ERR_INVALID_AEQ_ID";
+ case IAVF_ERR_INVALID_SIZE:
+ return "IAVF_ERR_INVALID_SIZE";
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ return "IAVF_ERR_INVALID_ARP_INDEX";
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ return "IAVF_ERR_INVALID_FPM_FUNC_ID";
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ return "IAVF_ERR_QP_INVALID_MSG_SIZE";
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ return "IAVF_ERR_INVALID_FRAG_COUNT";
+ case IAVF_ERR_QUEUE_EMPTY:
+ return "IAVF_ERR_QUEUE_EMPTY";
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ return "IAVF_ERR_INVALID_ALIGNMENT";
+ case IAVF_ERR_FLUSHED_QUEUE:
+ return "IAVF_ERR_FLUSHED_QUEUE";
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
+ case IAVF_ERR_TIMEOUT:
+ return "IAVF_ERR_TIMEOUT";
+ case IAVF_ERR_OPCODE_MISMATCH:
+ return "IAVF_ERR_OPCODE_MISMATCH";
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ return "IAVF_ERR_CQP_COMPL_ERROR";
+ case IAVF_ERR_INVALID_VF_ID:
+ return "IAVF_ERR_INVALID_VF_ID";
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ return "IAVF_ERR_INVALID_HMCFN_ID";
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ return "IAVF_ERR_BACKING_PAGE_ERROR";
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ return "IAVF_ERR_INVALID_PBLE_INDEX";
+ case IAVF_ERR_INVALID_SD_INDEX:
+ return "IAVF_ERR_INVALID_SD_INDEX";
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case IAVF_ERR_INVALID_SD_TYPE:
+ return "IAVF_ERR_INVALID_SD_TYPE";
+ case IAVF_ERR_MEMCPY_FAILED:
+ return "IAVF_ERR_MEMCPY_FAILED";
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case IAVF_ERR_SRQ_ENABLED:
+ return "IAVF_ERR_SRQ_ENABLED";
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_ERROR";
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return "IAVF_ERR_BUF_TOO_SHORT";
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ return "IAVF_ERR_ADMIN_QUEUE_FULL";
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case IAVF_ERR_BAD_IWARP_CQE:
+ return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_NVM_BLANK_MODE:
+ return "IAVF_ERR_NVM_BLANK_MODE";
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ return "IAVF_ERR_NOT_IMPLEMENTED";
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ return "IAVF_ERR_DIAG_TEST_FAILED";
+ case IAVF_ERR_NOT_READY:
+ return "IAVF_ERR_NOT_READY";
+ case IAVF_NOT_SUPPORTED:
+ return "IAVF_NOT_SUPPORTED";
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ return "IAVF_ERR_FIRMWARE_API_VERSION";
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+ u16 len;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ len = LE16_TO_CPU(aq_desc->datalen);
+
+ iavf_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ iavf_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ iavf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ iavf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * iavf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool iavf_check_asq_alive(struct iavf_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * iavf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw,
+ bool unloading)
+{
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_queue_shutdown *cmd =
+ (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(IAVF_AQ_DRIVER_UNLOADING);
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_lut *cmd_resp =
+ (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_set_rss_lut);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * iavf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * iavf_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_key *cmd_resp =
+ (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_set_rss_key);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * iavf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT iavf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum iavf_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ IAVF_RX_PTYPE_##OUTER_FRAG, \
+ IAVF_RX_PTYPE_TUNNEL_##T, \
+ IAVF_RX_PTYPE_TUNNEL_END_##TE, \
+ IAVF_RX_PTYPE_##TEF, \
+ IAVF_RX_PTYPE_INNER_PROT_##I, \
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
+#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
+#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
+ /* L2 Packet types */
+ IAVF_PTT_UNUSED_ENTRY(0),
+ IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(4),
+ IAVF_PTT_UNUSED_ENTRY(5),
+ IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(8),
+ IAVF_PTT_UNUSED_ENTRY(9),
+ IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(25),
+ IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(32),
+ IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(39),
+ IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(47),
+ IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(54),
+ IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(62),
+ IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(69),
+ IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(77),
+ IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(84),
+ IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(91),
+ IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(98),
+ IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(105),
+ IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(113),
+ IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(120),
+ IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(128),
+ IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(135),
+ IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(143),
+ IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(150),
+ IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ IAVF_PTT_UNUSED_ENTRY(154),
+ IAVF_PTT_UNUSED_ENTRY(155),
+ IAVF_PTT_UNUSED_ENTRY(156),
+ IAVF_PTT_UNUSED_ENTRY(157),
+ IAVF_PTT_UNUSED_ENTRY(158),
+ IAVF_PTT_UNUSED_ENTRY(159),
+
+ IAVF_PTT_UNUSED_ENTRY(160),
+ IAVF_PTT_UNUSED_ENTRY(161),
+ IAVF_PTT_UNUSED_ENTRY(162),
+ IAVF_PTT_UNUSED_ENTRY(163),
+ IAVF_PTT_UNUSED_ENTRY(164),
+ IAVF_PTT_UNUSED_ENTRY(165),
+ IAVF_PTT_UNUSED_ENTRY(166),
+ IAVF_PTT_UNUSED_ENTRY(167),
+ IAVF_PTT_UNUSED_ENTRY(168),
+ IAVF_PTT_UNUSED_ENTRY(169),
+
+ IAVF_PTT_UNUSED_ENTRY(170),
+ IAVF_PTT_UNUSED_ENTRY(171),
+ IAVF_PTT_UNUSED_ENTRY(172),
+ IAVF_PTT_UNUSED_ENTRY(173),
+ IAVF_PTT_UNUSED_ENTRY(174),
+ IAVF_PTT_UNUSED_ENTRY(175),
+ IAVF_PTT_UNUSED_ENTRY(176),
+ IAVF_PTT_UNUSED_ENTRY(177),
+ IAVF_PTT_UNUSED_ENTRY(178),
+ IAVF_PTT_UNUSED_ENTRY(179),
+
+ IAVF_PTT_UNUSED_ENTRY(180),
+ IAVF_PTT_UNUSED_ENTRY(181),
+ IAVF_PTT_UNUSED_ENTRY(182),
+ IAVF_PTT_UNUSED_ENTRY(183),
+ IAVF_PTT_UNUSED_ENTRY(184),
+ IAVF_PTT_UNUSED_ENTRY(185),
+ IAVF_PTT_UNUSED_ENTRY(186),
+ IAVF_PTT_UNUSED_ENTRY(187),
+ IAVF_PTT_UNUSED_ENTRY(188),
+ IAVF_PTT_UNUSED_ENTRY(189),
+
+ IAVF_PTT_UNUSED_ENTRY(190),
+ IAVF_PTT_UNUSED_ENTRY(191),
+ IAVF_PTT_UNUSED_ENTRY(192),
+ IAVF_PTT_UNUSED_ENTRY(193),
+ IAVF_PTT_UNUSED_ENTRY(194),
+ IAVF_PTT_UNUSED_ENTRY(195),
+ IAVF_PTT_UNUSED_ENTRY(196),
+ IAVF_PTT_UNUSED_ENTRY(197),
+ IAVF_PTT_UNUSED_ENTRY(198),
+ IAVF_PTT_UNUSED_ENTRY(199),
+
+ IAVF_PTT_UNUSED_ENTRY(200),
+ IAVF_PTT_UNUSED_ENTRY(201),
+ IAVF_PTT_UNUSED_ENTRY(202),
+ IAVF_PTT_UNUSED_ENTRY(203),
+ IAVF_PTT_UNUSED_ENTRY(204),
+ IAVF_PTT_UNUSED_ENTRY(205),
+ IAVF_PTT_UNUSED_ENTRY(206),
+ IAVF_PTT_UNUSED_ENTRY(207),
+ IAVF_PTT_UNUSED_ENTRY(208),
+ IAVF_PTT_UNUSED_ENTRY(209),
+
+ IAVF_PTT_UNUSED_ENTRY(210),
+ IAVF_PTT_UNUSED_ENTRY(211),
+ IAVF_PTT_UNUSED_ENTRY(212),
+ IAVF_PTT_UNUSED_ENTRY(213),
+ IAVF_PTT_UNUSED_ENTRY(214),
+ IAVF_PTT_UNUSED_ENTRY(215),
+ IAVF_PTT_UNUSED_ENTRY(216),
+ IAVF_PTT_UNUSED_ENTRY(217),
+ IAVF_PTT_UNUSED_ENTRY(218),
+ IAVF_PTT_UNUSED_ENTRY(219),
+
+ IAVF_PTT_UNUSED_ENTRY(220),
+ IAVF_PTT_UNUSED_ENTRY(221),
+ IAVF_PTT_UNUSED_ENTRY(222),
+ IAVF_PTT_UNUSED_ENTRY(223),
+ IAVF_PTT_UNUSED_ENTRY(224),
+ IAVF_PTT_UNUSED_ENTRY(225),
+ IAVF_PTT_UNUSED_ENTRY(226),
+ IAVF_PTT_UNUSED_ENTRY(227),
+ IAVF_PTT_UNUSED_ENTRY(228),
+ IAVF_PTT_UNUSED_ENTRY(229),
+
+ IAVF_PTT_UNUSED_ENTRY(230),
+ IAVF_PTT_UNUSED_ENTRY(231),
+ IAVF_PTT_UNUSED_ENTRY(232),
+ IAVF_PTT_UNUSED_ENTRY(233),
+ IAVF_PTT_UNUSED_ENTRY(234),
+ IAVF_PTT_UNUSED_ENTRY(235),
+ IAVF_PTT_UNUSED_ENTRY(236),
+ IAVF_PTT_UNUSED_ENTRY(237),
+ IAVF_PTT_UNUSED_ENTRY(238),
+ IAVF_PTT_UNUSED_ENTRY(239),
+
+ IAVF_PTT_UNUSED_ENTRY(240),
+ IAVF_PTT_UNUSED_ENTRY(241),
+ IAVF_PTT_UNUSED_ENTRY(242),
+ IAVF_PTT_UNUSED_ENTRY(243),
+ IAVF_PTT_UNUSED_ENTRY(244),
+ IAVF_PTT_UNUSED_ENTRY(245),
+ IAVF_PTT_UNUSED_ENTRY(246),
+ IAVF_PTT_UNUSED_ENTRY(247),
+ IAVF_PTT_UNUSED_ENTRY(248),
+ IAVF_PTT_UNUSED_ENTRY(249),
+
+ IAVF_PTT_UNUSED_ENTRY(250),
+ IAVF_PTT_UNUSED_ENTRY(251),
+ IAVF_PTT_UNUSED_ENTRY(252),
+ IAVF_PTT_UNUSED_ENTRY(253),
+ IAVF_PTT_UNUSED_ENTRY(254),
+ IAVF_PTT_UNUSED_ENTRY(255)
+};
+
+/**
+ * iavf_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+
+ DEBUGFUNC("iavf_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (IAVF_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = IAVF_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * iavf_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ struct iavf_aq_desc desc;
+ struct iavf_asq_cmd_details details;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(IAVF_AQ_FLAG_BUF
+ | IAVF_AQ_FLAG_RD));
+ if (msglen > IAVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ iavf_memset(&details, 0, sizeof(details), IAVF_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = iavf_asq_send_command(hw, (struct iavf_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * iavf_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ iavf_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ ETH_ALEN,
+ IAVF_NONDMA_TO_NONDMA);
+ iavf_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN,
+ IAVF_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * iavf_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
+{
+ return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ IAVF_SUCCESS, NULL, 0, NULL);
+}
+
+/**
+* iavf_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ struct iavf_aq_desc desc;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_clear_all_wol_filters);
+
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_devids.h b/src/spdk/dpdk/drivers/common/iavf/iavf_devids.h
new file mode 100644
index 000000000..2e63aac28
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_devids.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_DEVIDS_H_
+#define _IAVF_DEVIDS_H_
+
+/* Vendor ID */
+#define IAVF_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs for the VF driver */
+#define IAVF_DEV_ID_VF 0x154C
+#define IAVF_DEV_ID_VF_HV 0x1571
+#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889
+#define IAVF_DEV_ID_X722_VF 0x37CD
+
+#endif /* _IAVF_DEVIDS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_impl.c b/src/spdk/dpdk/drivers/common/iavf/iavf_impl.c
new file mode 100644
index 000000000..6174a9144
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_impl.c
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_random.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+
+#include "iavf_type.h"
+#include "iavf_prototype.h"
+
+int iavf_common_logger;
+
+enum iavf_status
+iavf_allocate_dma_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "iavf_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment,
+ RTE_PGSIZE_2M);
+ if (!mz)
+ return IAVF_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = mz->iova;
+ mem->zone = (const void *)mz;
+
+ return IAVF_SUCCESS;
+}
+
+enum iavf_status
+iavf_free_dma_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_dma_mem *mem)
+{
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return IAVF_SUCCESS;
+}
+
+enum iavf_status
+iavf_allocate_virt_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("iavf", size, 0);
+
+ if (mem->va)
+ return IAVF_SUCCESS;
+ else
+ return IAVF_ERR_NO_MEMORY;
+}
+
+enum iavf_status
+iavf_free_virt_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_virt_mem *mem)
+{
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return IAVF_SUCCESS;
+}
+
+RTE_INIT(iavf_common_init_log)
+{
+ iavf_common_logger = rte_log_register("pmd.common.iavf");
+ if (iavf_common_logger >= 0)
+ rte_log_set_level(iavf_common_logger, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h b/src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h
new file mode 100644
index 000000000..7cba13ff7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_OSDEP_H_
+#define _IAVF_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#ifndef __INTEL_NET_BASE_OSDEP__
+#define __INTEL_NET_BASE_OSDEP__
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t s64;
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f)
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
+#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
+#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
+
+#define NTOHS(a) rte_be_to_cpu_16(a)
+#define NTOHL(a) rte_be_to_cpu_32(a)
+#define HTONS(a) rte_cpu_to_be_16(a)
+#define HTONL(a) rte_cpu_to_be_32(a)
+
+static __rte_always_inline uint32_t
+readl(volatile void *addr)
+{
+ return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+static __rte_always_inline void
+writel(uint32_t value, volatile void *addr)
+{
+ rte_write32(rte_cpu_to_le_32(value), addr);
+}
+
+static __rte_always_inline void
+writel_relaxed(uint32_t value, volatile void *addr)
+{
+ rte_write32_relaxed(rte_cpu_to_le_32(value), addr);
+}
+
+static __rte_always_inline uint64_t
+readq(volatile void *addr)
+{
+ return rte_le_to_cpu_64(rte_read64(addr));
+}
+
+static __rte_always_inline void
+writeq(uint64_t value, volatile void *addr)
+{
+ rte_write64(rte_cpu_to_le_64(value), addr);
+}
+
+#define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+#define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+
+#endif /* __INTEL_NET_BASE_OSDEP__ */
+
+#define iavf_memset(a, b, c, d) memset((a), (b), (c))
+#define iavf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define iavf_usec_delay(x) rte_delay_us_sleep(x)
+#define iavf_msec_delay(x) iavf_usec_delay(1000 * (x))
+
+#define IAVF_PCI_REG_WRITE(reg, value) writel(value, reg)
+#define IAVF_PCI_REG_WRITE_RELAXED(reg, value) writel_relaxed(value, reg)
+
+#define IAVF_READ_REG(hw, reg) rd32(hw, reg)
+#define IAVF_WRITE_REG(hw, reg, value) wr32(hw, reg, value)
+
+#define IAVF_WRITE_FLUSH(a) IAVF_READ_REG(a, IAVF_VFGEN_RSTAT)
+
+extern int iavf_common_logger;
+
+#define DEBUGOUT(S) rte_log(RTE_LOG_DEBUG, iavf_common_logger, S)
+#define DEBUGOUT2(S, A...) rte_log(RTE_LOG_DEBUG, iavf_common_logger, S, ##A)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+
+#define iavf_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ rte_log(RTE_LOG_DEBUG, iavf_common_logger, \
+ "iavf %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* memory allocation tracking */
+struct iavf_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ const void *zone;
+} __rte_packed;
+
+struct iavf_virt_mem {
+ void *va;
+ u32 size;
+} __rte_packed;
+
+/* SW spinlock */
+struct iavf_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define iavf_allocate_dma_mem(h, m, unused, s, a) \
+ iavf_allocate_dma_mem_d(h, m, s, a)
+#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)
+
+#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
+#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
+
+static inline void
+iavf_init_spinlock_d(struct iavf_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+static inline void
+iavf_acquire_spinlock_d(struct iavf_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+static inline void
+iavf_release_spinlock_d(struct iavf_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+static inline void
+iavf_destroy_spinlock_d(__rte_unused struct iavf_spinlock *sp)
+{
+}
+
+#define iavf_init_spinlock(_sp) iavf_init_spinlock_d(_sp)
+#define iavf_acquire_spinlock(_sp) iavf_acquire_spinlock_d(_sp)
+#define iavf_release_spinlock(_sp) iavf_release_spinlock_d(_sp)
+#define iavf_destroy_spinlock(_sp) iavf_destroy_spinlock_d(_sp)
+
+#endif /* _IAVF_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h b/src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h
new file mode 100644
index 000000000..31ce2af49
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_PROTOTYPE_H_
+#define _IAVF_PROTOTYPE_H_
+
+#include "iavf_type.h"
+#include "iavf_alloc.h"
+#include "virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_init_asq(struct iavf_hw *hw);
+enum iavf_status iavf_init_arq(struct iavf_hw *hw);
+enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw);
+enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw);
+u16 iavf_clean_asq(struct iavf_hw *hw);
+void iavf_free_adminq_asq(struct iavf_hw *hw);
+void iavf_free_adminq_arq(struct iavf_hw *hw);
+enum iavf_status iavf_validate_mac_addr(u8 *mac_addr);
+void iavf_adminq_init_ring_data(struct iavf_hw *hw);
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *events_pending);
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details);
+bool iavf_asq_done(struct iavf_hw *hw);
+
+/* debug function for adminq */
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void iavf_idle_aq(struct iavf_hw *hw);
+bool iavf_check_asq_alive(struct iavf_hw *hw);
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
+
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
+ u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
+ u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
+
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
+
+extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
+
+STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return iavf_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void iavf_init_spinlock(struct iavf_spinlock *sp);
+void iavf_acquire_spinlock(struct iavf_spinlock *sp);
+void iavf_release_spinlock(struct iavf_spinlock *sp);
+void iavf_destroy_spinlock(struct iavf_spinlock *sp);
+
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg);
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details);
+enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct iavf_asq_cmd_details *cmd_details);
+enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
+ struct iavf_asq_cmd_details *cmd_details);
+#endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_register.h b/src/spdk/dpdk/drivers/common/iavf/iavf_register.h
new file mode 100644
index 000000000..03d62a9da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_register.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_REGISTER_H_
+#define _IAVF_REGISTER_H_
+
+#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define IAVF_VF_ARQH1_ARQH_SHIFT 0
+#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT)
+#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQVFE_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQVFE_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTL01_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define IAVF_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define IAVF_VFINT_ICR01_QUEUE_0_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_QUEUE_0_SHIFT)
+#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define IAVF_VFINT_ICR01_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR01_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_ADMINQ_SHIFT)
+#define IAVF_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY_MAX_INDEX 12
+#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_VFQF_HLUT_MAX_INDEX 15
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+
+#endif /* _IAVF_REGISTER_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_status.h b/src/spdk/dpdk/drivers/common/iavf/iavf_status.h
new file mode 100644
index 000000000..f42563806
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_status.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_STATUS_H_
+#define _IAVF_STATUS_H_
+
+/* Error Codes */
+enum iavf_status {
+ IAVF_SUCCESS = 0,
+ IAVF_ERR_NVM = -1,
+ IAVF_ERR_NVM_CHECKSUM = -2,
+ IAVF_ERR_PHY = -3,
+ IAVF_ERR_CONFIG = -4,
+ IAVF_ERR_PARAM = -5,
+ IAVF_ERR_MAC_TYPE = -6,
+ IAVF_ERR_UNKNOWN_PHY = -7,
+ IAVF_ERR_LINK_SETUP = -8,
+ IAVF_ERR_ADAPTER_STOPPED = -9,
+ IAVF_ERR_INVALID_MAC_ADDR = -10,
+ IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_INVALID_LINK_SETTINGS = -13,
+ IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ IAVF_ERR_RESET_FAILED = -15,
+ IAVF_ERR_SWFW_SYNC = -16,
+ IAVF_ERR_NO_AVAILABLE_VSI = -17,
+ IAVF_ERR_NO_MEMORY = -18,
+ IAVF_ERR_BAD_PTR = -19,
+ IAVF_ERR_RING_FULL = -20,
+ IAVF_ERR_INVALID_PD_ID = -21,
+ IAVF_ERR_INVALID_QP_ID = -22,
+ IAVF_ERR_INVALID_CQ_ID = -23,
+ IAVF_ERR_INVALID_CEQ_ID = -24,
+ IAVF_ERR_INVALID_AEQ_ID = -25,
+ IAVF_ERR_INVALID_SIZE = -26,
+ IAVF_ERR_INVALID_ARP_INDEX = -27,
+ IAVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ IAVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ IAVF_ERR_INVALID_FRAG_COUNT = -31,
+ IAVF_ERR_QUEUE_EMPTY = -32,
+ IAVF_ERR_INVALID_ALIGNMENT = -33,
+ IAVF_ERR_FLUSHED_QUEUE = -34,
+ IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ IAVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ IAVF_ERR_TIMEOUT = -37,
+ IAVF_ERR_OPCODE_MISMATCH = -38,
+ IAVF_ERR_CQP_COMPL_ERROR = -39,
+ IAVF_ERR_INVALID_VF_ID = -40,
+ IAVF_ERR_INVALID_HMCFN_ID = -41,
+ IAVF_ERR_BACKING_PAGE_ERROR = -42,
+ IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ IAVF_ERR_INVALID_PBLE_INDEX = -44,
+ IAVF_ERR_INVALID_SD_INDEX = -45,
+ IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ IAVF_ERR_INVALID_SD_TYPE = -47,
+ IAVF_ERR_MEMCPY_FAILED = -48,
+ IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ IAVF_ERR_SRQ_ENABLED = -52,
+ IAVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ IAVF_ERR_BUF_TOO_SHORT = -55,
+ IAVF_ERR_ADMIN_QUEUE_FULL = -56,
+ IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_NVM_BLANK_MODE = -59,
+ IAVF_ERR_NOT_IMPLEMENTED = -60,
+ IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ IAVF_ERR_DIAG_TEST_FAILED = -62,
+ IAVF_ERR_NOT_READY = -63,
+ IAVF_NOT_SUPPORTED = -64,
+ IAVF_ERR_FIRMWARE_API_VERSION = -65,
+ IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _IAVF_STATUS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_type.h b/src/spdk/dpdk/drivers/common/iavf/iavf_type.h
new file mode 100644
index 000000000..665b070a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_type.h
@@ -0,0 +1,1014 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_TYPE_H_
+#define _IAVF_TYPE_H_
+
+#include "iavf_status.h"
+#include "iavf_osdep.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_devids.h"
+
+#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+/* IAVF_MASK is a macro used on 32 bit registers */
+#define IAVF_MASK(mask, shift) (mask << shift)
+
+#define IAVF_MAX_PF 16
+#define IAVF_MAX_PF_VSI 64
+#define IAVF_MAX_PF_QP 128
+#define IAVF_MAX_VSI_QP 16
+#define IAVF_MAX_VF_VSI 4
+#define IAVF_MAX_CHAINED_RX_BUFFERS 5
+
+/* something less than 1 minute */
+#define IAVF_HEARTBEAT_TIMEOUT (HZ * 50)
+
+
+/* Check whether address is multicast. */
+#define IAVF_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IAVF_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+
+/* forward declaration */
+struct iavf_hw;
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
+
+#define ETH_ALEN 6
+/* Data type manipulation macros. */
+#define IAVF_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define IAVF_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define IAVF_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define IAVF_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define IAVF_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define IAVF_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define IAVF_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define IAVF_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define IAVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define IAVF_QTX_CTL_VF_QUEUE 0x0
+#define IAVF_QTX_CTL_VM_QUEUE 0x1
+#define IAVF_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum iavf_debug_mask {
+ IAVF_DEBUG_INIT = 0x00000001,
+ IAVF_DEBUG_RELEASE = 0x00000002,
+
+ IAVF_DEBUG_LINK = 0x00000010,
+ IAVF_DEBUG_PHY = 0x00000020,
+ IAVF_DEBUG_HMC = 0x00000040,
+ IAVF_DEBUG_NVM = 0x00000080,
+ IAVF_DEBUG_LAN = 0x00000100,
+ IAVF_DEBUG_FLOW = 0x00000200,
+ IAVF_DEBUG_DCB = 0x00000400,
+ IAVF_DEBUG_DIAG = 0x00000800,
+ IAVF_DEBUG_FD = 0x00001000,
+ IAVF_DEBUG_PACKAGE = 0x00002000,
+
+ IAVF_DEBUG_AQ_MESSAGE = 0x01000000,
+ IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ IAVF_DEBUG_AQ_COMMAND = 0x06000000,
+ IAVF_DEBUG_AQ = 0x0F000000,
+
+ IAVF_DEBUG_USER = 0xF0000000,
+
+ IAVF_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define IAVF_PCI_LINK_STATUS 0xB2
+#define IAVF_PCI_LINK_WIDTH 0x3F0
+#define IAVF_PCI_LINK_WIDTH_1 0x10
+#define IAVF_PCI_LINK_WIDTH_2 0x20
+#define IAVF_PCI_LINK_WIDTH_4 0x40
+#define IAVF_PCI_LINK_WIDTH_8 0x80
+#define IAVF_PCI_LINK_SPEED 0xF
+#define IAVF_PCI_LINK_SPEED_2500 0x1
+#define IAVF_PCI_LINK_SPEED_5000 0x2
+#define IAVF_PCI_LINK_SPEED_8000 0x3
+
+#define IAVF_MDIO_CLAUSE22_STCODE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_STCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE22_OPCODE_WRITE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE22_OPCODE_READ_MASK IAVF_MASK(2, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define IAVF_MDIO_CLAUSE45_STCODE_MASK IAVF_MASK(0, \
+ IAVF_GLGEN_MSCA_STCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK IAVF_MASK(0, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_WRITE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK IAVF_MASK(2, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_READ_MASK IAVF_MASK(3, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define IAVF_PHY_COM_REG_PAGE 0x1E
+#define IAVF_PHY_LED_LINK_MODE_MASK 0xF0
+#define IAVF_PHY_LED_MANUAL_ON 0x100
+#define IAVF_PHY_LED_PROV_REG_1 0xC430
+#define IAVF_PHY_LED_MODE_MASK 0xFFFF
+#define IAVF_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum iavf_memset_type {
+ IAVF_NONDMA_MEM = 0,
+ IAVF_DMA_MEM
+};
+
+/* Memcpy types */
+enum iavf_memcpy_type {
+ IAVF_NONDMA_TO_NONDMA = 0,
+ IAVF_NONDMA_TO_DMA,
+ IAVF_DMA_TO_DMA,
+ IAVF_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum iavf_mac_type {
+ IAVF_MAC_UNKNOWN = 0,
+ IAVF_MAC_XL710,
+ IAVF_MAC_VF,
+ IAVF_MAC_X722,
+ IAVF_MAC_X722_VF,
+ IAVF_MAC_GENERIC,
+};
+
+enum iavf_vsi_type {
+ IAVF_VSI_MAIN = 0,
+ IAVF_VSI_VMDQ1 = 1,
+ IAVF_VSI_VMDQ2 = 2,
+ IAVF_VSI_CTRL = 3,
+ IAVF_VSI_FCOE = 4,
+ IAVF_VSI_MIRROR = 5,
+ IAVF_VSI_SRIOV = 6,
+ IAVF_VSI_FDIR = 7,
+ IAVF_VSI_TYPE_UNKNOWN
+};
+
+enum iavf_queue_type {
+ IAVF_QUEUE_TYPE_RX = 0,
+ IAVF_QUEUE_TYPE_TX,
+ IAVF_QUEUE_TYPE_PE_CEQ,
+ IAVF_QUEUE_TYPE_UNKNOWN
+};
+
+#define IAVF_HW_CAP_MAX_GPIO 30
+#define IAVF_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define IAVF_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+enum iavf_acpi_programming_method {
+ IAVF_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ IAVF_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define IAVF_WOL_SUPPORT_MASK 0x1
+#define IAVF_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define IAVF_PROXY_SUPPORT_MASK 0x4
+
+/* Capabilities of a PF or a VF or the whole device */
+struct iavf_hw_capabilities {
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define IAVF_CLOUD_FILTER_MODE1 0x6
+#define IAVF_CLOUD_FILTER_MODE2 0x7
+#define IAVF_CLOUD_FILTER_MODE3 0x8
+#define IAVF_SWITCH_MODE_MASK 0xF
+
+ bool dcb;
+ bool fcoe;
+ bool iwarp;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors_vf;
+ bool apm_wol_support;
+ enum iavf_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+};
+
+struct iavf_mac_info {
+ enum iavf_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+#define IAVF_NVM_EXEC_GET_AQ_RESULT 0x0
+#define IAVF_NVM_EXEC_FEATURES 0xe
+#define IAVF_NVM_EXEC_STATUS 0xf
+
+/* NVMUpdate features API */
+#define IAVF_NVMUPD_FEATURES_API_VER_MAJOR 0
+#define IAVF_NVMUPD_FEATURES_API_VER_MINOR 14
+#define IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
+
+#define IAVF_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
+
+struct iavf_nvmupd_features {
+ u8 major;
+ u8 minor;
+ u16 size;
+ u8 features[IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
+};
+
+#define IAVF_MODULE_SFF_DIAG_CAPAB 0x40
+/* PCI bus types */
+enum iavf_bus_type {
+ iavf_bus_type_unknown = 0,
+ iavf_bus_type_pci,
+ iavf_bus_type_pcix,
+ iavf_bus_type_pci_express,
+ iavf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum iavf_bus_speed {
+ iavf_bus_speed_unknown = 0,
+ iavf_bus_speed_33 = 33,
+ iavf_bus_speed_66 = 66,
+ iavf_bus_speed_100 = 100,
+ iavf_bus_speed_120 = 120,
+ iavf_bus_speed_133 = 133,
+ iavf_bus_speed_2500 = 2500,
+ iavf_bus_speed_5000 = 5000,
+ iavf_bus_speed_8000 = 8000,
+ iavf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum iavf_bus_width {
+ iavf_bus_width_unknown = 0,
+ iavf_bus_width_pcie_x1 = 1,
+ iavf_bus_width_pcie_x2 = 2,
+ iavf_bus_width_pcie_x4 = 4,
+ iavf_bus_width_pcie_x8 = 8,
+ iavf_bus_width_32 = 32,
+ iavf_bus_width_64 = 64,
+ iavf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct iavf_bus_info {
+ enum iavf_bus_speed speed;
+ enum iavf_bus_width width;
+ enum iavf_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+#define IAVF_MAX_USER_PRIORITY 8
+#define IAVF_TLV_STATUS_OPER 0x1
+#define IAVF_TLV_STATUS_SYNC 0x2
+#define IAVF_TLV_STATUS_ERR 0x4
+#define IAVF_CEE_OPER_MAX_APPS 3
+#define IAVF_APP_PROTOID_FCOE 0x8906
+#define IAVF_APP_PROTOID_ISCSI 0x0cbc
+#define IAVF_APP_PROTOID_FIP 0x8914
+#define IAVF_APP_SEL_ETHTYPE 0x1
+#define IAVF_APP_SEL_TCPIP 0x2
+#define IAVF_CEE_APP_SEL_ETHTYPE 0x0
+#define IAVF_CEE_APP_SEL_TCPIP 0x1
+
+/* Port hardware description */
+struct iavf_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct iavf_mac_info mac;
+ struct iavf_bus_info bus;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ /* capabilities for entire device and PCI func */
+ struct iavf_hw_capabilities dev_caps;
+
+ /* Admin Queue info */
+ struct iavf_adminq_info aq;
+
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#define IAVF_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define IAVF_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define IAVF_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define IAVF_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define IAVF_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+ u64 flags;
+
+ /* NVMUpdate features */
+ struct iavf_nvmupd_features nvmupd_features;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+struct iavf_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union iavf_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union iavf_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define IAVF_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define IAVF_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ IAVF_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define IAVF_RXD_QW0_FCOEINDX_SHIFT 0
+#define IAVF_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ IAVF_RXD_QW0_FCOEINDX_SHIFT)
+
+enum iavf_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
+ IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
+ IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+
+ IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
+ IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define IAVF_RXD_QW1_STATUS_SHIFT 0
+#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
+ << IAVF_RXD_QW1_STATUS_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_UMBCAST_SHIFT IAVF_RX_DESC_STATUS_UMBCAST
+#define IAVF_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum iavf_rx_desc_fltstat_values {
+ IAVF_RX_DESC_FLTSTAT_NO_DATA = 0,
+ IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ IAVF_RX_DESC_FLTSTAT_RSV = 2,
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define IAVF_RXD_PACKET_TYPE_UNICAST 0
+#define IAVF_RXD_PACKET_TYPE_MULTICAST 1
+#define IAVF_RXD_PACKET_TYPE_BROADCAST 2
+#define IAVF_RXD_PACKET_TYPE_MIRRORED 3
+
+#define IAVF_RXD_QW1_ERROR_SHIFT 19
+#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
+
+enum iavf_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
+ IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
+ IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
+ IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum iavf_rx_desc_error_l3l4e_fcoe_masks {
+ IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
+ IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
+ IAVF_RX_DESC_ERROR_L3L4E_FC = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define IAVF_RXD_QW1_PTYPE_SHIFT 30
+#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum iavf_rx_l2_ptype {
+ IAVF_RX_PTYPE_L2_RESERVED = 0,
+ IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ IAVF_RX_PTYPE_L2_ARP = 11,
+ IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153,
+ IAVF_RX_PTYPE_PARSER_ABORTED = 255
+};
+
+struct iavf_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum iavf_rx_ptype_outer_ip {
+ IAVF_RX_PTYPE_OUTER_L2 = 0,
+ IAVF_RX_PTYPE_OUTER_IP = 1
+};
+
+enum iavf_rx_ptype_outer_ip_ver {
+ IAVF_RX_PTYPE_OUTER_NONE = 0,
+ IAVF_RX_PTYPE_OUTER_IPV4 = 0,
+ IAVF_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum iavf_rx_ptype_outer_fragmented {
+ IAVF_RX_PTYPE_NOT_FRAG = 0,
+ IAVF_RX_PTYPE_FRAG = 1
+};
+
+enum iavf_rx_ptype_tunnel_type {
+ IAVF_RX_PTYPE_TUNNEL_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum iavf_rx_ptype_tunnel_end_prot {
+ IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum iavf_rx_ptype_inner_prot {
+ IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
+ IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
+ IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
+ IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum iavf_rx_ptype_payload_layer {
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define IAVF_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define IAVF_RX_PTYPE_SHIFT 56
+
+#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define IAVF_RXD_QW1_NEXTP_SHIFT 38
+#define IAVF_RXD_QW1_NEXTP_MASK (0x1FFFULL << IAVF_RXD_QW1_NEXTP_SHIFT)
+
+#define IAVF_RXD_QW2_EXT_STATUS_SHIFT 0
+#define IAVF_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ IAVF_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum iavf_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ IAVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ IAVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define IAVF_RXD_QW2_L2TAG2_SHIFT 0
+#define IAVF_RXD_QW2_L2TAG2_MASK (0xFFFFUL << IAVF_RXD_QW2_L2TAG2_SHIFT)
+
+#define IAVF_RXD_QW2_L2TAG3_SHIFT 16
+#define IAVF_RXD_QW2_L2TAG3_MASK (0xFFFFUL << IAVF_RXD_QW2_L2TAG3_SHIFT)
+
+enum iavf_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ IAVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ IAVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ IAVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ IAVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ IAVF_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ IAVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum iavf_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum iavf_rx_prog_status_desc_prog_id_masks {
+ IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum iavf_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define IAVF_TWO_BIT_MASK 0x3
+#define IAVF_THREE_BIT_MASK 0x7
+#define IAVF_FOUR_BIT_MASK 0xF
+#define IAVF_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct iavf_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define IAVF_TXD_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+enum iavf_tx_desc_dtype_value {
+ IAVF_TX_DESC_DTYPE_DATA = 0x0,
+ IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ IAVF_TX_DESC_DTYPE_CONTEXT = 0x1,
+ IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define IAVF_TXD_QW1_CMD_SHIFT 4
+#define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
+
+enum iavf_tx_desc_cmd_bits {
+ IAVF_TX_DESC_CMD_EOP = 0x0001,
+ IAVF_TX_DESC_CMD_RS = 0x0002,
+ IAVF_TX_DESC_CMD_ICRC = 0x0004,
+ IAVF_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ IAVF_TX_DESC_CMD_DUMMY = 0x0010,
+ IAVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ IAVF_TX_DESC_CMD_FCOET = 0x0080,
+ IAVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define IAVF_TXD_QW1_OFFSET_SHIFT 16
+#define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ IAVF_TXD_QW1_OFFSET_SHIFT)
+
+enum iavf_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define IAVF_TXD_QW1_MACLEN_MASK (0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_QW1_IPLEN_MASK (0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_QW1_L4LEN_MASK (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_QW1_FCLEN_MASK (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_QW1_L2TAG1_SHIFT 48
+#define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct iavf_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_CTX_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT 4
+#define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
+
+enum iavf_tx_ctx_desc_cmd_bits {
+ IAVF_TX_CTX_DESC_TSO = 0x01,
+ IAVF_TX_CTX_DESC_TSYN = 0x02,
+ IAVF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ IAVF_TX_CTX_DESC_SWPE = 0x40
+};
+
+struct iavf_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define IAVF_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_NOP_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_NOP_QW1_CMD_SHIFT 4
+#define IAVF_TXD_NOP_QW1_CMD_MASK (0x7FUL << IAVF_TXD_NOP_QW1_CMD_SHIFT)
+
+enum iavf_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_NOP_DESC_EOP_SHIFT = 0,
+ IAVF_TX_NOP_DESC_RS_SHIFT = 1,
+ IAVF_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+/* Packet Classifier Types for filters */
+enum iavf_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ IAVF_FILTER_PCTYPE_FCOE_OX = 48,
+ IAVF_FILTER_PCTYPE_FCOE_RX = 49,
+ IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+#define IAVF_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ IAVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define IAVF_TXD_FLTR_QW1_ATR_MASK BIT_ULL(IAVF_TXD_FLTR_QW1_ATR_SHIFT)
+
+
+#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_MSS_SHIFT 50
+#define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ IAVF_TXD_CTX_QW1_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_VSI_SHIFT 50
+#define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ IAVF_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum iavf_tx_ctx_desc_eipt_offload {
+ IAVF_TX_CTX_EXT_IP_NONE = 0x0,
+ IAVF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ IAVF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9
+#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT)
+#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ IAVF_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ IAVF_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct iavf_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+#define IAVF_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define IAVF_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define IAVF_SR_OPTION_ROM_PTR 0x05
+#define IAVF_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define IAVF_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define IAVF_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define IAVF_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define IAVF_SR_RO_PCIE_LCB_PTR 0x0A
+#define IAVF_SR_EMP_IMAGE_PTR 0x0B
+#define IAVF_SR_PE_IMAGE_PTR 0x0C
+#define IAVF_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define IAVF_SR_MNG_CONFIG_PTR 0x0E
+#define IAVF_SR_PBA_FLAGS 0x15
+#define IAVF_SR_PBA_BLOCK_PTR 0x16
+#define IAVF_SR_BOOT_CONFIG_PTR 0x17
+#define IAVF_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define IAVF_SR_NVM_MAP_VERSION 0x29
+#define IAVF_SR_NVM_IMAGE_VERSION 0x2A
+#define IAVF_SR_NVM_STRUCTURE_VERSION 0x2B
+#define IAVF_SR_PXE_SETUP_PTR 0x30
+#define IAVF_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define IAVF_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define IAVF_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define IAVF_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define IAVF_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define IAVF_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define IAVF_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define IAVF_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define IAVF_SR_PHY_ACTIVITY_LIST_PTR 0x3D
+#define IAVF_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define IAVF_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define IAVF_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define IAVF_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define IAVF_SR_EMP_SR_SETTINGS_PTR 0x48
+#define IAVF_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define IAVF_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define IAVF_SR_IMMEDIATE_VALUES_PTR 0x4E
+#define IAVF_SR_OCP_CFG_WORD0 0x2B
+#define IAVF_SR_OCP_ENABLED BIT(15)
+#define IAVF_SR_BUF_ALIGNMENT 4096
+
+
+struct iavf_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define IAVF_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define IAVF_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define IAVF_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define IAVF_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define IAVF_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define IAVF_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define IAVF_ALT_BW_VALUE_MASK 0xFF
+#define IAVF_ALT_BW_RELATIVE_MASK 0x40000000
+#define IAVF_ALT_BW_VALID_MASK 0x80000000
+
+#define IAVF_DDP_TRACKID_RDONLY 0
+#define IAVF_DDP_TRACKID_INVALID 0xFFFFFFFF
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
+struct iavf_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct iavf_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+#endif /* _IAVF_TYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/meson.build b/src/spdk/dpdk/drivers/common/iavf/meson.build
new file mode 100644
index 000000000..1f4d8b898
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019-2020 Intel Corporation
+
+sources = files('iavf_adminq.c', 'iavf_common.c', 'iavf_impl.c')
+
+if cc.has_argument('-Wno-pointer-to-int-cast')
+ cflags += '-Wno-pointer-to-int-cast'
+endif
diff --git a/src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map b/src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map
new file mode 100644
index 000000000..92ceac108
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map
@@ -0,0 +1,12 @@
+DPDK_21 {
+ global:
+
+ iavf_init_adminq;
+ iavf_shutdown_adminq;
+ iavf_aq_send_msg_to_pf;
+ iavf_clean_arq_element;
+ iavf_set_mac_type;
+ iavf_vf_parse_hw_config;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/common/iavf/virtchnl.h b/src/spdk/dpdk/drivers/common/iavf/virtchnl.h
new file mode 100644
index 000000000..79515ee8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/virtchnl.h
@@ -0,0 +1,1311 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
+};
+
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with IAVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
+ /* opcodes 20, 21, and 22 are reserved */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcodes 34, 35, 36, 37 and 38 are reserved */
+ VIRTCHNL_OP_DCF_CMD_DESC = 39,
+ VIRTCHNL_OP_DCF_CMD_BUFF = 40,
+ VIRTCHNL_OP_DCF_DISABLE = 41,
+ VIRTCHNL_OP_DCF_GET_VSI_MAP = 42,
+ VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
+ VIRTCHNL_OP_ADD_RSS_CFG = 45,
+ VIRTCHNL_OP_DEL_RSS_CFG = 46,
+ VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+ VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_QUERY_FDIR_FILTER = 49,
+};
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures. */
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
+#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
+#define VIRTCHNL_VF_CAP_DCF 0X40000000
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC 0X04000000
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000
+ /* 0X80000000 is reserved */
+
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u8 crc_disable;
+ /* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */
+ u8 rxdid;
+ u8 pad1[2];
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
+ * PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes */
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* This is used by PF driver to enforce how many channels can be supported.
+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
+ * PF driver will allow only max 4 channels
+ */
+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* vlan_prio is part of this 16 bit field even from OS perspective
+ * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
+ * in future, when decided to offload vlan_prio, pass that information
+ * as part of the "vlan_id" field, Bit14..12
+ */
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+ VIRTCHNL_UDP_V4_FLOW,
+ VIRTCHNL_UDP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+/* VIRTCHNL_OP_DCF_GET_VSI_MAP
+ * VF sends this message to get VSI mapping table.
+ * PF responds with an indirect message containing VF's
+ * HW VSI IDs.
+ * The index of vf_vsi array is the logical VF ID, the
+ * value of vf_vsi array is the VF's HW VSI ID with its
+ * valid configuration.
+ */
+struct virtchnl_dcf_vsi_map {
+ u16 pf_vsi; /* PF's HW VSI ID */
+ u16 num_vfs; /* The actual number of VFs allocated */
+#define VIRTCHNL_DCF_VF_VSI_ID_S 0
+#define VIRTCHNL_DCF_VF_VSI_ID_M (0xFFF << VIRTCHNL_DCF_VF_VSI_ID_S)
+#define VIRTCHNL_DCF_VF_VSI_VALID (1 << 15)
+ u16 vf_vsi[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map);
+
+#define PKG_NAME_SIZE 32
+#define DSN_SIZE 8
+
+struct pkg_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, pkg_version);
+
+struct virtchnl_pkg_info {
+ struct pkg_version pkg_ver;
+ u32 track_id;
+ char pkg_name[PKG_NAME_SIZE];
+ u8 dsn[DSN_SIZE];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info);
+
+struct virtchnl_supported_rxdids {
+ u64 supported_rxdids;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_supported_rxdids);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+ VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
+ struct {
+ enum virtchnl_link_speed link_speed;
+ u8 link_status;
+ } link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
+ struct {
+ u16 vf_id;
+ u16 vsi_id;
+ } vf_vsi_map;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+ sizeof(struct virtchnl_queue_pair_info),
+
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+ sizeof(struct virtchnl_vector_map),
+
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr),
+
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+ sizeof(u16),
+
+
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+ sizeof(struct virtchnl_channel_info),
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) \
+ (proto_hdr_type << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+ (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+};
+
+struct virtchnl_proto_hdr {
+ enum virtchnl_proto_hdr_type type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+ u8 tunnel_level;
+ /**
+ * specify where protocol header start from.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+ enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+ enum virtchnl_action type;
+ union {
+ /* used for queue and qgroup action */
+ struct {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set {
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ int count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* query information to retrieve fdir rule counters.
+ * PF will fill out this structure to reset counter.
+ */
+struct virtchnl_fdir_query_info {
+ u32 match_packets_valid:1;
+ u32 match_bytes_valid:1;
+ u32 reserved:30; /* Reserved, must be zero. */
+ u32 pad;
+ u64 matched_packets; /* Number of packets for this rule. */
+ u64 matched_bytes; /* Number of bytes through this rule. */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status {
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
+/* VIRTCHNL_OP_QUERY_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * flow_id and reset_counter. PF will return query_info
+ * and query_status to VF.
+ */
+struct virtchnl_fdir_query {
+ u16 vsi_id; /* INPUT */
+ u16 pad1[3];
+ u32 flow_id; /* INPUT */
+ u32 reset_counter:1; /* INPUT */
+ struct virtchnl_fdir_query_info query_info; /* OUTPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query);
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ u32 valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+
+ if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+
+ if (vimi->num_vectors == 0 || vimi->num_vectors >
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ if (veal->num_elements == 0 || veal->num_elements >
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+
+ if (vfl->num_elements == 0 || vfl->num_elements >
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += vfl->num_elements * sizeof(u16);
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len == 0) {
+ /* zero length is allowed as input */
+ break;
+ }
+
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries == 0) {
+ /* zero entries is allowed as input */
+ break;
+ }
+
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+
+ if (vti->num_tc == 0 || vti->num_tc >
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ case VIRTCHNL_OP_DCF_CMD_DESC:
+ case VIRTCHNL_OP_DCF_CMD_BUFF:
+ /* These two opcodes are specific to handle the AdminQ command,
+ * so the validation needs to be done in PF's context.
+ */
+ return 0;
+ case VIRTCHNL_OP_DCF_DISABLE:
+ case VIRTCHNL_OP_DCF_GET_VSI_MAP:
+ /* The two opcodes are required by DCF without message buffer,
+ * so the valid length keeps the default value 0.
+ */
+ break;
+ case VIRTCHNL_OP_DCF_GET_PKG_INFO:
+ break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ valid_len = sizeof(struct virtchnl_rss_cfg);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_add);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_del);
+ break;
+ case VIRTCHNL_OP_QUERY_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_query);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/src/spdk/dpdk/drivers/common/meson.build b/src/spdk/dpdk/drivers/common/meson.build
new file mode 100644
index 000000000..ffd06e2c3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+std_deps = ['eal']
+drivers = ['cpt', 'dpaax', 'iavf', 'mlx5', 'mvep', 'octeontx', 'octeontx2', 'qat']
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+driver_name_fmt = 'rte_common_@0@'
diff --git a/src/spdk/dpdk/drivers/common/mlx5/Makefile b/src/spdk/dpdk/drivers/common/mlx5/Makefile
new file mode 100644
index 000000000..0d8cc1bf5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/Makefile
@@ -0,0 +1,362 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Mellanox Technologies, Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name.
+LIB = librte_common_mlx5.a
+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
+LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
+LIB_GLUE_VERSION = 20.02.0
+
+# Sources.
+ifeq ($(findstring y,$(CONFIG_RTE_LIBRTE_MLX5_PMD)$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD)),y)
+ifneq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+SRCS-y += mlx5_glue.c
+endif
+SRCS-y += mlx5_devx_cmds.c
+SRCS-y += mlx5_common.c
+SRCS-y += mlx5_nl.c
+SRCS-y += mlx5_common_mp.c
+SRCS-y += mlx5_common_mr.c
+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+INSTALL-y-lib += $(LIB_GLUE)
+endif
+endif
+
+# Basic CFLAGS.
+CFLAGS += -O3
+CFLAGS += -std=c11 -Wall -Wextra
+CFLAGS += -g
+CFLAGS += -I.
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=600
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
+CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
+CFLAGS_mlx5_glue.o += -fPIC
+LDLIBS += -ldl
+else ifeq ($(CONFIG_RTE_IBVERBS_LINK_STATIC),y)
+LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh)
+else
+LDLIBS += -libverbs -lmlx5
+endif
+
+LDLIBS += -lrte_eal -lrte_pci -lrte_kvargs -lrte_net
+
+# A few warnings cannot be avoided in external headers.
+CFLAGS += -Wno-error=cast-qual -UPEDANTIC
+
+EXPORT_MAP := rte_common_mlx5_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up mlx5_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+mlx5_autoconf.h.new: FORCE
+
+mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_RELAXED_ORDERING \
+ infiniband/verbs.h \
+ enum IBV_ACCESS_RELAXED_ORDERING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_MPLS_SUPPORT \
+ infiniband/verbs.h \
+ enum IBV_FLOW_SPEC_MPLS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \
+ infiniband/verbs.h \
+ enum IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_WQ_FLAG_RX_END_PADDING \
+ infiniband/verbs.h \
+ enum IBV_WQ_FLAG_RX_END_PADDING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_SWP \
+ infiniband/mlx5dv.h \
+ type 'struct mlx5dv_sw_parsing_caps' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_MPW \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_PAD \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_FLOW_DV_SUPPORT \
+ infiniband/mlx5dv.h \
+ func mlx5dv_create_flow_action_packet_reformat \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_DR_DOMAIN_TYPE_NIC_RX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_ESWITCH \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_DR_DOMAIN_TYPE_FDB \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_VLAN \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dr_action_create_push_vlan \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_DEVX_PORT \
+ infiniband/mlx5dv.h \
+ func mlx5dv_query_devx_port \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVX_OBJ \
+ infiniband/mlx5dv.h \
+ func mlx5dv_devx_obj_create \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_FLOW_DEVX_COUNTERS \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_FLOW_ACTION_COUNTERS_DEVX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVX_ASYNC \
+ infiniband/mlx5dv.h \
+ func mlx5dv_devx_obj_query_async \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVX_QP \
+ infiniband/mlx5dv.h \
+ func mlx5dv_devx_qp_query \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dr_action_create_dest_devx_tir \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVX_EVENT \
+ infiniband/mlx5dv.h \
+ func mlx5dv_devx_get_event \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dr_action_create_flow_meter \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5_DR_FLOW_DUMP \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dump_dr_domain \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD \
+ infiniband/mlx5dv.h \
+ enum MLX5_MMAP_GET_NC_PAGES_CMD \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_VAR \
+ infiniband/mlx5dv.h \
+ func mlx5dv_alloc_var \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_25G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_50G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_100G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_COUNTERS_SET_V42 \
+ infiniband/verbs.h \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_COUNTERS_SET_V45 \
+ infiniband/verbs.h \
+ type 'struct ibv_counters_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NL_NLDEV \
+ rdma/rdma_netlink.h \
+ enum RDMA_NL_NLDEV \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_PORT_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_PORT_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_NAME \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_PORT_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_NDEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_NUM_VF \
+ linux/if_link.h \
+ enum IFLA_NUM_VF \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_EXT_MASK \
+ linux/if_link.h \
+ enum IFLA_EXT_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_SWITCH_ID \
+ linux/if_link.h \
+ enum IFLA_PHYS_SWITCH_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_PORT_NAME \
+ linux/if_link.h \
+ enum IFLA_PHYS_PORT_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_DEVLINK \
+ linux/devlink.h \
+ define DEVLINK_GENL_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
+ $(AUTOCONF_OUTPUT)
+
+# Create mlx5_autoconf.h or update it in case it differs from the new one.
+
+mlx5_autoconf.h: mlx5_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+ifeq ($(findstring y,$(CONFIG_RTE_LIBRTE_MLX5_PMD)$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD)),y)
+$(SRCS-y:.c=.o): mlx5_autoconf.h
+endif
+
+# Generate dependency plug-in for rdma-core when the PMD must not be linked
+# directly, so that applications do not inherit this dependency.
+
+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+
+$(LIB): $(LIB_GLUE)
+
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
+$(LIB_GLUE): mlx5_glue.o
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
+ -Wl,-h,$(LIB_GLUE) \
+ -shared -o $@ $< -libverbs -lmlx5
+
+mlx5_glue.o: mlx5_autoconf.h
+
+endif
+
+clean_mlx5: FORCE
+ $Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new
+ $Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*
+
+clean: clean_mlx5
diff --git a/src/spdk/dpdk/drivers/common/mlx5/meson.build b/src/spdk/dpdk/drivers/common/mlx5/meson.build
new file mode 100644
index 000000000..5a802ba34
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/meson.build
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Mellanox Technologies, Ltd
+
+if not is_linux
+ build = false
+ reason = 'only supported on Linux'
+ subdir_done()
+endif
+
+static_ibverbs = (get_option('ibverbs_link') == 'static')
+dlopen_ibverbs = (get_option('ibverbs_link') == 'dlopen')
+LIB_GLUE_BASE = 'librte_pmd_mlx5_glue.so'
+LIB_GLUE_VERSION = '20.02.0'
+LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION
+if dlopen_ibverbs
+ dpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1)
+ cflags += [
+ '-DMLX5_GLUE="@0@"'.format(LIB_GLUE),
+ '-DMLX5_GLUE_VERSION="@0@"'.format(LIB_GLUE_VERSION),
+ ]
+endif
+
+libnames = [ 'mlx5', 'ibverbs' ]
+libs = []
+foreach libname:libnames
+ lib = dependency('lib' + libname, static:static_ibverbs, required:false)
+ if not lib.found() and not static_ibverbs
+ lib = cc.find_library(libname, required:false)
+ endif
+ if lib.found()
+ libs += lib
+ if not static_ibverbs and not dlopen_ibverbs
+ ext_deps += lib
+ endif
+ else
+ build = false
+ reason = 'missing dependency, "' + libname + '"'
+ subdir_done()
+ endif
+endforeach
+if static_ibverbs or dlopen_ibverbs
+ # Build without adding shared libs to Requires.private
+ ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout()
+ ext_deps += declare_dependency(compile_args: ibv_cflags.split())
+endif
+if static_ibverbs
+ # Add static deps ldflags to internal apps and Libs.private
+ ibv_ldflags = run_command(ldflags_ibverbs_static, check:true).stdout()
+ ext_deps += declare_dependency(link_args:ibv_ldflags.split())
+endif
+
+deps += ['hash', 'pci', 'net', 'eal', 'kvargs']
+sources = files(
+ 'mlx5_devx_cmds.c',
+ 'mlx5_common.c',
+ 'mlx5_nl.c',
+ 'mlx5_common_mp.c',
+ 'mlx5_common_mr.c',
+)
+if not dlopen_ibverbs
+ sources += files('mlx5_glue.c')
+endif
+cflags_options = [
+ '-std=c11',
+ '-Wno-strict-prototypes',
+ '-D_BSD_SOURCE',
+ '-D_DEFAULT_SOURCE',
+ '-D_XOPEN_SOURCE=600'
+]
+foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+endforeach
+if get_option('buildtype').contains('debug')
+ cflags += [ '-pedantic', '-DPEDANTIC' ]
+else
+ cflags += [ '-UPEDANTIC' ]
+endif
+# To maintain the compatibility with the make build system
+# mlx5_autoconf.h file is still generated.
+# input array for meson member search:
+# [ "MACRO to define if found", "header for the search",
+# "symbol to search", "struct member to search" ]
+has_member_args = [
+ [ 'HAVE_IBV_MLX5_MOD_SWP', 'infiniband/mlx5dv.h',
+ 'struct mlx5dv_sw_parsing_caps', 'sw_parsing_offloads' ],
+ [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V42', 'infiniband/verbs.h',
+ 'struct ibv_counter_set_init_attr', 'counter_set_id' ],
+ [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V45', 'infiniband/verbs.h',
+ 'struct ibv_counters_init_attr', 'comp_mask' ],
+]
+# input array for meson symbol search:
+# [ "MACRO to define if found", "header for the search",
+# "symbol to search" ]
+has_sym_args = [
+ [ 'HAVE_IBV_RELAXED_ORDERING', 'infiniband/verbs.h',
+ 'IBV_ACCESS_RELAXED_ORDERING ' ],
+ [ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX' ],
+ [ 'HAVE_IBV_DEVICE_TUNNEL_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS' ],
+ [ 'HAVE_IBV_MLX5_MOD_MPW', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED' ],
+ [ 'HAVE_IBV_MLX5_MOD_CQE_128B_COMP', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP' ],
+ [ 'HAVE_IBV_MLX5_MOD_CQE_128B_PAD', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD' ],
+ [ 'HAVE_IBV_FLOW_DV_SUPPORT', 'infiniband/mlx5dv.h',
+ 'mlx5dv_create_flow_action_packet_reformat' ],
+ [ 'HAVE_IBV_DEVICE_MPLS_SUPPORT', 'infiniband/verbs.h',
+ 'IBV_FLOW_SPEC_MPLS' ],
+ [ 'HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING', 'infiniband/verbs.h',
+ 'IBV_WQ_FLAGS_PCI_WRITE_END_PADDING' ],
+ [ 'HAVE_IBV_WQ_FLAG_RX_END_PADDING', 'infiniband/verbs.h',
+ 'IBV_WQ_FLAG_RX_END_PADDING' ],
+ [ 'HAVE_MLX5DV_DR_DEVX_PORT', 'infiniband/mlx5dv.h',
+ 'mlx5dv_query_devx_port' ],
+ [ 'HAVE_IBV_DEVX_OBJ', 'infiniband/mlx5dv.h',
+ 'mlx5dv_devx_obj_create' ],
+ [ 'HAVE_IBV_FLOW_DEVX_COUNTERS', 'infiniband/mlx5dv.h',
+ 'MLX5DV_FLOW_ACTION_COUNTERS_DEVX' ],
+ [ 'HAVE_IBV_DEVX_ASYNC', 'infiniband/mlx5dv.h',
+ 'mlx5dv_devx_obj_query_async' ],
+ [ 'HAVE_IBV_DEVX_QP', 'infiniband/mlx5dv.h',
+ 'mlx5dv_devx_qp_query' ],
+ [ 'HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dr_action_create_dest_devx_tir' ],
+ [ 'HAVE_IBV_DEVX_EVENT', 'infiniband/mlx5dv.h',
+ 'mlx5dv_devx_get_event' ],
+ [ 'HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dr_action_create_flow_meter' ],
+ [ 'HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD', 'infiniband/mlx5dv.h',
+ 'MLX5_MMAP_GET_NC_PAGES_CMD' ],
+ [ 'HAVE_MLX5DV_DR', 'infiniband/mlx5dv.h',
+ 'MLX5DV_DR_DOMAIN_TYPE_NIC_RX' ],
+ [ 'HAVE_MLX5DV_DR_ESWITCH', 'infiniband/mlx5dv.h',
+ 'MLX5DV_DR_DOMAIN_TYPE_FDB' ],
+ [ 'HAVE_MLX5DV_DR_VLAN', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dr_action_create_push_vlan' ],
+ [ 'HAVE_IBV_VAR', 'infiniband/mlx5dv.h', 'mlx5dv_alloc_var' ],
+ [ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseKR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseCR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseSR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseSR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseLR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseLR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseKR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseKR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseCR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseCR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseSR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseSR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseLR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseLR4_Full' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_25G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_25000baseCR_Full_BIT' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_50G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_100G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT' ],
+ [ 'HAVE_IFLA_NUM_VF', 'linux/if_link.h',
+ 'IFLA_NUM_VF' ],
+ [ 'HAVE_IFLA_EXT_MASK', 'linux/if_link.h',
+ 'IFLA_EXT_MASK' ],
+ [ 'HAVE_IFLA_PHYS_SWITCH_ID', 'linux/if_link.h',
+ 'IFLA_PHYS_SWITCH_ID' ],
+ [ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',
+ 'IFLA_PHYS_PORT_NAME' ],
+ [ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',
+ 'RDMA_NL_NLDEV' ],
+ [ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_CMD_GET' ],
+ [ 'HAVE_RDMA_NLDEV_CMD_PORT_GET', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_CMD_PORT_GET' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_DEV_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_DEV_INDEX' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_DEV_NAME', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_DEV_NAME' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_PORT_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_PORT_INDEX' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_NDEV_INDEX' ],
+ [ 'HAVE_MLX5_DR_FLOW_DUMP', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dump_dr_domain'],
+ [ 'HAVE_DEVLINK', 'linux/devlink.h', 'DEVLINK_GENL_NAME' ],
+]
+config = configuration_data()
+foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2],
+ dependencies: libs))
+endforeach
+foreach arg:has_member_args
+ file_prefix = '#include <' + arg[1] + '>'
+ config.set(arg[0], cc.has_member(arg[2], arg[3],
+ prefix : file_prefix, dependencies: libs))
+endforeach
+configure_file(output : 'mlx5_autoconf.h', configuration : config)
+
+# Build Glue Library
+if dlopen_ibverbs
+ dlopen_name = 'mlx5_glue'
+ dlopen_lib_name = 'rte_pmd_@0@'.format(dlopen_name)
+ dlopen_so_version = LIB_GLUE_VERSION
+ dlopen_sources = files('mlx5_glue.c')
+ dlopen_install_dir = [ eal_pmd_path + '-glue' ]
+ dlopen_includes = [global_inc]
+ dlopen_includes += include_directories(
+ '../../../lib/librte_eal/include/generic',
+ )
+ shared_lib = shared_library(
+ dlopen_lib_name,
+ dlopen_sources,
+ include_directories: dlopen_includes,
+ c_args: cflags,
+ dependencies: libs,
+ link_args: [
+ '-Wl,-export-dynamic',
+ '-Wl,-h,@0@'.format(LIB_GLUE),
+ ],
+ soversion: dlopen_so_version,
+ install: true,
+ install_dir: dlopen_install_dir,
+ )
+endif
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common.c b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common.c
new file mode 100644
index 000000000..1c77763da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#include <unistd.h>
+#include <string.h>
+#include <stdio.h>
+#ifdef RTE_IBVERBS_LINK_DLOPEN
+#include <dlfcn.h>
+#endif
+
+#include <rte_errno.h>
+
+#include "mlx5_common.h"
+#include "mlx5_common_utils.h"
+#include "mlx5_glue.h"
+
+
+int mlx5_common_logtype;
+
+#ifdef MLX5_GLUE
+const struct mlx5_glue *mlx5_glue;
+#endif
+
+uint8_t haswell_broadwell_cpu;
+
+/**
+ * Get PCI information by sysfs device path.
+ *
+ * @param dev_path
+ * Pointer to device sysfs folder name.
+ * @param[out] pci_addr
+ * PCI bus address output buffer.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_to_pci_addr(const char *dev_path,
+ struct rte_pci_addr *pci_addr)
+{
+ FILE *file;
+ char line[32];
+ MKSTR(path, "%s/device/uevent", dev_path);
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ while (fgets(line, sizeof(line), file) == line) {
+ size_t len = strlen(line);
+ int ret;
+
+ /* Truncate long lines. */
+ if (len == (sizeof(line) - 1))
+ while (line[(len - 1)] != '\n') {
+ ret = fgetc(file);
+ if (ret == EOF)
+ break;
+ line[(len - 1)] = ret;
+ }
+ /* Extract information. */
+ if (sscanf(line,
+ "PCI_SLOT_NAME="
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ &pci_addr->domain,
+ &pci_addr->bus,
+ &pci_addr->devid,
+ &pci_addr->function) == 4) {
+ ret = 0;
+ break;
+ }
+ }
+ fclose(file);
+ return 0;
+}
+
+static int
+mlx5_class_check_handler(__rte_unused const char *key, const char *value,
+ void *opaque)
+{
+ enum mlx5_class *ret = opaque;
+
+ if (strcmp(value, "vdpa") == 0) {
+ *ret = MLX5_CLASS_VDPA;
+ } else if (strcmp(value, "net") == 0) {
+ *ret = MLX5_CLASS_NET;
+ } else {
+ DRV_LOG(ERR, "Invalid mlx5 class %s. Maybe typo in device"
+ " class argument setting?", value);
+ *ret = MLX5_CLASS_INVALID;
+ }
+ return 0;
+}
+
+enum mlx5_class
+mlx5_class_get(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *key = MLX5_CLASS_ARG_NAME;
+ enum mlx5_class ret = MLX5_CLASS_NET;
+
+ if (devargs == NULL)
+ return ret;
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return ret;
+ if (rte_kvargs_count(kvlist, key))
+ rte_kvargs_process(kvlist, key, mlx5_class_check_handler, &ret);
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/**
+ * Extract port name, as a number, from sysfs or netlink information.
+ *
+ * @param[in] port_name_in
+ * String representing the port name.
+ * @param[out] port_info_out
+ * Port information, including port name as a number and port name
+ * type if recognized
+ *
+ * @return
+ * port_name field set according to recognized name format.
+ */
+void
+mlx5_translate_port_name(const char *port_name_in,
+ struct mlx5_switch_info *port_info_out)
+{
+ char pf_c1, pf_c2, vf_c1, vf_c2;
+ char *end;
+ int sc_items;
+
+ /*
+ * Check for port-name as a string of the form pf0vf0
+ * (support kernel ver >= 5.0 or OFED ver >= 4.6).
+ */
+ sc_items = sscanf(port_name_in, "%c%c%d%c%c%d",
+ &pf_c1, &pf_c2, &port_info_out->pf_num,
+ &vf_c1, &vf_c2, &port_info_out->port_name);
+ if (sc_items == 6 &&
+ pf_c1 == 'p' && pf_c2 == 'f' &&
+ vf_c1 == 'v' && vf_c2 == 'f') {
+ port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_PFVF;
+ return;
+ }
+ /*
+ * Check for port-name as a string of the form p0
+ * (support kernel ver >= 5.0, or OFED ver >= 4.6).
+ */
+ sc_items = sscanf(port_name_in, "%c%d",
+ &pf_c1, &port_info_out->port_name);
+ if (sc_items == 2 && pf_c1 == 'p') {
+ port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
+ return;
+ }
+ /* Check for port-name as a number (support kernel ver < 5.0 */
+ errno = 0;
+ port_info_out->port_name = strtol(port_name_in, &end, 0);
+ if (!errno &&
+ (size_t)(end - port_name_in) == strlen(port_name_in)) {
+ port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_LEGACY;
+ return;
+ }
+ port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN;
+ return;
+}
+
+#ifdef MLX5_GLUE
+
+/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx5_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ RTE_LOG(ERR, PMD, "unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"), please"
+ " re-configure DPDK");
+ return NULL;
+}
+
+static int
+mlx5_glue_dlopen(void)
+{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
+ void *handle = NULL;
+
+ const char *path[] = {
+ /*
+ * A basic security check is necessary before trusting
+ * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
+ */
+ (geteuid() == getuid() && getegid() == getgid() ?
+ getenv("MLX5_GLUE_PATH") : NULL),
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
+ };
+ unsigned int i = 0;
+ void **sym;
+ const char *dlmsg;
+
+ while (!handle && i != RTE_DIM(path)) {
+ const char *end;
+ size_t len;
+ int ret;
+
+ if (!path[i]) {
+ ++i;
+ continue;
+ }
+ end = strpbrk(path[i], ":;");
+ if (!end)
+ end = path[i] + strlen(path[i]);
+ len = end - path[i];
+ ret = 0;
+ do {
+ char name[ret + 1];
+
+ ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
+ (int)len, path[i],
+ (!len || *(end - 1) == '/') ? "" : "/");
+ if (ret == -1)
+ break;
+ if (sizeof(name) != (size_t)ret + 1)
+ continue;
+ DRV_LOG(DEBUG, "Looking for rdma-core glue as "
+ "\"%s\"", name);
+ handle = dlopen(name, RTLD_LAZY);
+ break;
+ } while (1);
+ path[i] = end + 1;
+ if (!*end)
+ ++i;
+ }
+ if (!handle) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ DRV_LOG(WARNING, "Cannot load glue library: %s", dlmsg);
+ goto glue_error;
+ }
+ sym = dlsym(handle, "mlx5_glue");
+ if (!sym || !*sym) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ DRV_LOG(ERR, "Cannot resolve glue symbol: %s", dlmsg);
+ goto glue_error;
+ }
+ mlx5_glue = *sym;
+ return 0;
+
+glue_error:
+ if (handle)
+ dlclose(handle);
+ return -1;
+}
+
+#endif
+
+/* In case this is an x86_64 intel processor to check if
+ * we should use relaxed ordering.
+ */
+#ifdef RTE_ARCH_X86_64
+/**
+ * This function returns processor identification and feature information
+ * into the registers.
+ *
+ * @param eax, ebx, ecx, edx
+ * Pointers to the registers that will hold cpu information.
+ * @param level
+ * The main category of information returned.
+ */
+static inline void mlx5_cpu_id(unsigned int level,
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ __asm__("cpuid\n\t"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (level));
+}
+#endif
+
+RTE_INIT_PRIO(mlx5_log_init, LOG)
+{
+ mlx5_common_logtype = rte_log_register("pmd.common.mlx5");
+ if (mlx5_common_logtype >= 0)
+ rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE);
+}
+
+/**
+ * Initialization routine for run-time dependency on rdma-core.
+ */
+RTE_INIT_PRIO(mlx5_glue_init, CLASS)
+{
+ /*
+ * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
+ * huge pages. Calling ibv_fork_init() during init allows
+ * applications to use fork() safely for purposes other than
+ * using this PMD, which is not supported in forked processes.
+ */
+ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+ /* Match the size of Rx completion entry to the size of a cacheline. */
+ if (RTE_CACHE_LINE_SIZE == 128)
+ setenv("MLX5_CQE_SIZE", "128", 0);
+ /*
+ * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
+ * cleanup all the Verbs resources even when the device was removed.
+ */
+ setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
+
+#ifdef MLX5_GLUE
+ if (mlx5_glue_dlopen() != 0)
+ goto glue_error;
+#endif
+
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ /* Glue structure must not contain any NULL pointers. */
+ {
+ unsigned int i;
+
+ for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
+ MLX5_ASSERT(((const void *const *)mlx5_glue)[i]);
+ }
+#endif
+ if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "rdma-core glue \"%s\" mismatch: \"%s\" is "
+ "required", mlx5_glue->version, MLX5_GLUE_VERSION);
+ goto glue_error;
+ }
+ mlx5_glue->fork_init();
+ return;
+
+glue_error:
+ DRV_LOG(WARNING, "Cannot initialize MLX5 common due to missing"
+ " run-time dependency on rdma-core libraries (libibverbs,"
+ " libmlx5)");
+ mlx5_glue = NULL;
+ return;
+}
+
+/**
+ * This function is responsible of initializing the variable
+ * haswell_broadwell_cpu by checking if the cpu is intel
+ * and reading the data returned from mlx5_cpu_id().
+ * since haswell and broadwell cpus don't have improved performance
+ * when using relaxed ordering we want to check the cpu type before
+ * before deciding whether to enable RO or not.
+ * if the cpu is haswell or broadwell the variable will be set to 1
+ * otherwise it will be 0.
+ */
+RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
+{
+#ifdef RTE_ARCH_X86_64
+ unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
+ unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
+ unsigned int i, model, family, brand_id, vendor;
+ unsigned int signature_intel_ebx = 0x756e6547;
+ unsigned int extended_model;
+ unsigned int eax = 0;
+ unsigned int ebx = 0;
+ unsigned int ecx = 0;
+ unsigned int edx = 0;
+ int max_level;
+
+ mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
+ vendor = ebx;
+ max_level = eax;
+ if (max_level < 1) {
+ haswell_broadwell_cpu = 0;
+ return;
+ }
+ mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
+ model = (eax >> 4) & 0x0f;
+ family = (eax >> 8) & 0x0f;
+ brand_id = ebx & 0xff;
+ extended_model = (eax >> 12) & 0xf0;
+ /* Check if the processor is Haswell or Broadwell */
+ if (vendor == signature_intel_ebx) {
+ if (family == 0x06)
+ model += extended_model;
+ if (brand_id == 0 && family == 0x6) {
+ for (i = 0; i < RTE_DIM(broadwell_models); i++)
+ if (model == broadwell_models[i]) {
+ haswell_broadwell_cpu = 1;
+ return;
+ }
+ for (i = 0; i < RTE_DIM(haswell_models); i++)
+ if (model == haswell_models[i]) {
+ haswell_broadwell_cpu = 1;
+ return;
+ }
+ }
+ }
+#endif
+ haswell_broadwell_cpu = 0;
+}
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common.h
new file mode 100644
index 000000000..8cd3ea590
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_COMMON_H_
+#define RTE_PMD_MLX5_COMMON_H_
+
+#include <stdio.h>
+
+#include <rte_pci.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_log.h>
+#include <rte_kvargs.h>
+#include <rte_devargs.h>
+
+#include "mlx5_prm.h"
+
+
+/* Bit-field manipulation. */
+#define BITFIELD_DECLARE(bf, type, size) \
+ type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
+ !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
+#define BITFIELD_DEFINE(bf, type, size) \
+ BITFIELD_DECLARE((bf), type, (size)) = { 0 }
+#define BITFIELD_SET(bf, b) \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
+#define BITFIELD_RESET(bf, b) \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+ ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
+#define BITFIELD_ISSET(bf, b) \
+ !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+
+/*
+ * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
+ * manner.
+ */
+#define PMD_DRV_LOG_STRIP(a, b) a
+#define PMD_DRV_LOG_OPAREN (
+#define PMD_DRV_LOG_CPAREN )
+#define PMD_DRV_LOG_COMMA ,
+
+/* Return the file name part of a path. */
+static inline const char *
+pmd_drv_log_basename(const char *s)
+{
+ const char *n = s;
+
+ while (*n)
+ if (*(n++) == '/')
+ s = n;
+ return s;
+}
+
+#define PMD_DRV_LOG___(level, type, name, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ type, \
+ RTE_FMT(name ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+/*
+ * When debugging is enabled (MLX5_DEBUG not defined), file, line and function
+ * information replace the driver name (MLX5_DRIVER_NAME) in log messages.
+ */
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+
+#define PMD_DRV_LOG__(level, type, name, ...) \
+ PMD_DRV_LOG___(level, type, name, "%s:%u: %s(): " __VA_ARGS__)
+#define PMD_DRV_LOG_(level, type, name, s, ...) \
+ PMD_DRV_LOG__(level, type, name,\
+ s "\n" PMD_DRV_LOG_COMMA \
+ pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \
+ __LINE__ PMD_DRV_LOG_COMMA \
+ __func__, \
+ __VA_ARGS__)
+
+#else /* RTE_LIBRTE_MLX5_DEBUG */
+#define PMD_DRV_LOG__(level, type, name, ...) \
+ PMD_DRV_LOG___(level, type, name, __VA_ARGS__)
+#define PMD_DRV_LOG_(level, type, name, s, ...) \
+ PMD_DRV_LOG__(level, type, name, s "\n", __VA_ARGS__)
+
+#endif /* RTE_LIBRTE_MLX5_DEBUG */
+
+/* claim_zero() does not perform any check when debugging is disabled. */
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
+#define MLX5_ASSERT(exp) RTE_VERIFY(exp)
+#define claim_zero(...) MLX5_ASSERT((__VA_ARGS__) == 0)
+#define claim_nonzero(...) MLX5_ASSERT((__VA_ARGS__) != 0)
+
+#else /* RTE_LIBRTE_MLX5_DEBUG */
+
+#define DEBUG(...) (void)0
+#define MLX5_ASSERT(exp) RTE_ASSERT(exp)
+#define claim_zero(...) (__VA_ARGS__)
+#define claim_nonzero(...) (__VA_ARGS__)
+
+#endif /* RTE_LIBRTE_MLX5_DEBUG */
+
+/* Allocate a buffer on the stack and fill it with a printf format string. */
+#define MKSTR(name, ...) \
+ int mkstr_size_##name = snprintf(NULL, 0, "" __VA_ARGS__); \
+ char name[mkstr_size_##name + 1]; \
+ \
+ snprintf(name, sizeof(name), "" __VA_ARGS__)
+
+enum {
+ PCI_VENDOR_ID_MELLANOX = 0x15b3,
+};
+
+enum {
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6DX = 0x101d,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF = 0x101e,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF = 0xa2d6,
+};
+
+/* Maximum number of simultaneous unicast MAC addresses. */
+#define MLX5_MAX_UC_MAC_ADDRESSES 128
+/* Maximum number of simultaneous Multicast MAC addresses. */
+#define MLX5_MAX_MC_MAC_ADDRESSES 128
+/* Maximum number of simultaneous MAC addresses. */
+#define MLX5_MAX_MAC_ADDRESSES \
+ (MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES)
+
+/* Recognized Infiniband device physical port name types. */
+enum mlx5_nl_phys_port_name_type {
+ MLX5_PHYS_PORT_NAME_TYPE_NOTSET = 0, /* Not set. */
+ MLX5_PHYS_PORT_NAME_TYPE_LEGACY, /* before kernel ver < 5.0 */
+ MLX5_PHYS_PORT_NAME_TYPE_UPLINK, /* p0, kernel ver >= 5.0 */
+ MLX5_PHYS_PORT_NAME_TYPE_PFVF, /* pf0vf0, kernel ver >= 5.0 */
+ MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN, /* Unrecognized. */
+};
+
+/** Switch information returned by mlx5_nl_switch_info(). */
+struct mlx5_switch_info {
+ uint32_t master:1; /**< Master device. */
+ uint32_t representor:1; /**< Representor device. */
+ enum mlx5_nl_phys_port_name_type name_type; /** < Port name type. */
+ int32_t pf_num; /**< PF number (valid for pfxvfx format only). */
+ int32_t port_name; /**< Representor port name. */
+ uint64_t switch_id; /**< Switch identifier. */
+};
+
+/* CQE status. */
+enum mlx5_cqe_status {
+ MLX5_CQE_STATUS_SW_OWN = -1,
+ MLX5_CQE_STATUS_HW_OWN = -2,
+ MLX5_CQE_STATUS_ERR = -3,
+};
+
+/**
+ * Check whether CQE is valid.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ * @param cqes_n
+ * Size of completion queue.
+ * @param ci
+ * Consumer index.
+ *
+ * @return
+ * The CQE status.
+ */
+static __rte_always_inline enum mlx5_cqe_status
+check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n,
+ const uint16_t ci)
+{
+ const uint16_t idx = ci & cqes_n;
+ const uint8_t op_own = cqe->op_own;
+ const uint8_t op_owner = MLX5_CQE_OWNER(op_own);
+ const uint8_t op_code = MLX5_CQE_OPCODE(op_own);
+
+ if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
+ return MLX5_CQE_STATUS_HW_OWN;
+ rte_cio_rmb();
+ if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
+ op_code == MLX5_CQE_REQ_ERR))
+ return MLX5_CQE_STATUS_ERR;
+ return MLX5_CQE_STATUS_SW_OWN;
+}
+
+__rte_internal
+int mlx5_dev_to_pci_addr(const char *dev_path, struct rte_pci_addr *pci_addr);
+
+#define MLX5_CLASS_ARG_NAME "class"
+
+enum mlx5_class {
+ MLX5_CLASS_NET,
+ MLX5_CLASS_VDPA,
+ MLX5_CLASS_INVALID,
+};
+
+__rte_internal
+enum mlx5_class mlx5_class_get(struct rte_devargs *devargs);
+__rte_internal
+void mlx5_translate_port_name(const char *port_name_in,
+ struct mlx5_switch_info *port_info_out);
+
+extern uint8_t haswell_broadwell_cpu;
+
+#endif /* RTE_PMD_MLX5_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.c b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.c
new file mode 100644
index 000000000..da55143bc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.c
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 6WIND S.A.
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#include <stdio.h>
+#include <time.h>
+
+#include <rte_eal.h>
+#include <rte_errno.h>
+
+#include "mlx5_common_mp.h"
+#include "mlx5_common_utils.h"
+
+/**
+ * Request Memory Region creation to the primary process.
+ *
+ * @param[in] mp_id
+ * ID of the MP process.
+ * @param addr
+ * Target virtual address to register.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr)
+{
+ struct rte_mp_msg mp_req;
+ struct rte_mp_msg *mp_res;
+ struct rte_mp_reply mp_rep;
+ struct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;
+ struct mlx5_mp_param *res;
+ struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ mp_init_msg(mp_id, &mp_req, MLX5_MP_REQ_CREATE_MR);
+ req->args.addr = addr;
+ ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
+ if (ret) {
+ DRV_LOG(ERR, "port %u request to primary process failed",
+ mp_id->port_id);
+ return -rte_errno;
+ }
+ MLX5_ASSERT(mp_rep.nb_received == 1);
+ mp_res = &mp_rep.msgs[0];
+ res = (struct mlx5_mp_param *)mp_res->param;
+ ret = res->result;
+ if (ret)
+ rte_errno = -ret;
+ free(mp_rep.msgs);
+ return ret;
+}
+
+/**
+ * Request Verbs queue state modification to the primary process.
+ *
+ * @param[in] mp_id
+ * ID of the MP process.
+ * @param sm
+ * State modify parameters.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,
+ struct mlx5_mp_arg_queue_state_modify *sm)
+{
+ struct rte_mp_msg mp_req;
+ struct rte_mp_msg *mp_res;
+ struct rte_mp_reply mp_rep;
+ struct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;
+ struct mlx5_mp_param *res;
+ struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ mp_init_msg(mp_id, &mp_req, MLX5_MP_REQ_QUEUE_STATE_MODIFY);
+ req->args.state_modify = *sm;
+ ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
+ if (ret) {
+ DRV_LOG(ERR, "port %u request to primary process failed",
+ mp_id->port_id);
+ return -rte_errno;
+ }
+ MLX5_ASSERT(mp_rep.nb_received == 1);
+ mp_res = &mp_rep.msgs[0];
+ res = (struct mlx5_mp_param *)mp_res->param;
+ ret = res->result;
+ free(mp_rep.msgs);
+ return ret;
+}
+
+/**
+ * Request Verbs command file descriptor for mmap to the primary process.
+ *
+ * @param[in] mp_id
+ * ID of the MP process.
+ *
+ * @return
+ * fd on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mp_req_verbs_cmd_fd(struct mlx5_mp_id *mp_id)
+{
+ struct rte_mp_msg mp_req;
+ struct rte_mp_msg *mp_res;
+ struct rte_mp_reply mp_rep;
+ struct mlx5_mp_param *res;
+ struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ mp_init_msg(mp_id, &mp_req, MLX5_MP_REQ_VERBS_CMD_FD);
+ ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
+ if (ret) {
+ DRV_LOG(ERR, "port %u request to primary process failed",
+ mp_id->port_id);
+ return -rte_errno;
+ }
+ MLX5_ASSERT(mp_rep.nb_received == 1);
+ mp_res = &mp_rep.msgs[0];
+ res = (struct mlx5_mp_param *)mp_res->param;
+ if (res->result) {
+ rte_errno = -res->result;
+ DRV_LOG(ERR,
+ "port %u failed to get command FD from primary process",
+ mp_id->port_id);
+ ret = -rte_errno;
+ goto exit;
+ }
+ MLX5_ASSERT(mp_res->num_fds == 1);
+ ret = mp_res->fds[0];
+ DRV_LOG(DEBUG, "port %u command FD from primary is %d",
+ mp_id->port_id, ret);
+exit:
+ free(mp_rep.msgs);
+ return ret;
+}
+
+/**
+ * Initialize by primary process.
+ */
+int
+mlx5_mp_init_primary(const char *name, const rte_mp_t primary_action)
+{
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+
+ /* primary is allowed to not support IPC */
+ ret = rte_mp_action_register(name, primary_action);
+ if (ret && rte_errno != ENOTSUP)
+ return -1;
+ return 0;
+}
+
+/**
+ * Un-initialize by primary process.
+ */
+void
+mlx5_mp_uninit_primary(const char *name)
+{
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ rte_mp_action_unregister(name);
+}
+
+/**
+ * Initialize by secondary process.
+ */
+int
+mlx5_mp_init_secondary(const char *name, const rte_mp_t secondary_action)
+{
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ return rte_mp_action_register(name, secondary_action);
+}
+
+/**
+ * Un-initialize by secondary process.
+ */
+void
+mlx5_mp_uninit_secondary(const char *name)
+{
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ rte_mp_action_unregister(name);
+}
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.h
new file mode 100644
index 000000000..05466fdc9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mp.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_COMMON_MP_H_
+#define RTE_PMD_MLX5_COMMON_MP_H_
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+
+/* Request types for IPC. */
+enum mlx5_mp_req_type {
+ MLX5_MP_REQ_VERBS_CMD_FD = 1,
+ MLX5_MP_REQ_CREATE_MR,
+ MLX5_MP_REQ_START_RXTX,
+ MLX5_MP_REQ_STOP_RXTX,
+ MLX5_MP_REQ_QUEUE_STATE_MODIFY,
+};
+
+struct mlx5_mp_arg_queue_state_modify {
+ uint8_t is_wq; /* Set if WQ. */
+ uint16_t queue_id; /* DPDK queue ID. */
+ enum ibv_wq_state state; /* WQ requested state. */
+};
+
+/* Pameters for IPC. */
+struct mlx5_mp_param {
+ enum mlx5_mp_req_type type;
+ int port_id;
+ int result;
+ RTE_STD_C11
+ union {
+ uintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */
+ struct mlx5_mp_arg_queue_state_modify state_modify;
+ /* MLX5_MP_REQ_QUEUE_STATE_MODIFY */
+ } args;
+};
+
+/* Identifier of a MP process */
+struct mlx5_mp_id {
+ char name[RTE_MP_MAX_NAME_LEN];
+ uint16_t port_id;
+};
+
+/** Request timeout for IPC. */
+#define MLX5_MP_REQ_TIMEOUT_SEC 5
+
+/**
+ * Initialize IPC message.
+ *
+ * @param[in] port_id
+ * Port ID of the device.
+ * @param[out] msg
+ * Pointer to message to fill in.
+ * @param[in] type
+ * Message type.
+ */
+static inline void
+mp_init_msg(struct mlx5_mp_id *mp_id, struct rte_mp_msg *msg,
+ enum mlx5_mp_req_type type)
+{
+ struct mlx5_mp_param *param = (struct mlx5_mp_param *)msg->param;
+
+ memset(msg, 0, sizeof(*msg));
+ strlcpy(msg->name, mp_id->name, sizeof(msg->name));
+ msg->len_param = sizeof(*param);
+ param->type = type;
+ param->port_id = mp_id->port_id;
+}
+
+__rte_internal
+int mlx5_mp_init_primary(const char *name, const rte_mp_t primary_action);
+__rte_internal
+void mlx5_mp_uninit_primary(const char *name);
+__rte_internal
+int mlx5_mp_init_secondary(const char *name, const rte_mp_t secondary_action);
+__rte_internal
+void mlx5_mp_uninit_secondary(const char *name);
+__rte_internal
+int mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr);
+__rte_internal
+int mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,
+ struct mlx5_mp_arg_queue_state_modify *sm);
+__rte_internal
+int mlx5_mp_req_verbs_cmd_fd(struct mlx5_mp_id *mp_id);
+
+#endif /* RTE_PMD_MLX5_COMMON_MP_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.c
new file mode 100644
index 000000000..3b4644646
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.c
@@ -0,0 +1,1110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2020 Mellanox Technologies, Ltd
+ */
+#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_rwlock.h>
+
+#include "mlx5_glue.h"
+#include "mlx5_common_mp.h"
+#include "mlx5_common_mr.h"
+#include "mlx5_common_utils.h"
+
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+/**
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_expand(struct mlx5_mr_btree *bt, int n)
+{
+ void *mem;
+ int ret = 0;
+
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mr_cache_entry), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
+ (void *)bt);
+ ret = -1;
+ } else {
+ DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
+
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+{
+ struct mr_cache_entry *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
+
+ MLX5_ASSERT(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ MLX5_ASSERT(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
+{
+ struct mr_cache_entry *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
+
+ MLX5_ASSERT(bt != NULL);
+ MLX5_ASSERT(bt->len <= bt->size);
+ MLX5_ASSERT(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DRV_LOG(DEBUG,
+ "abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
+ }
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
+ }
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DRV_LOG(DEBUG,
+ "inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ MLX5_ASSERT(!bt->table && !bt->size);
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mr_cache_entry),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ DEBUG("failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mr_cache_entry) {
+ .lkey = UINT32_MAX,
+ };
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
+{
+ if (bt == NULL)
+ return;
+ DEBUG("freeing B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
+{
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ int idx;
+ struct mr_cache_entry *lkp_tbl;
+
+ if (bt == NULL)
+ return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mr_cache_entry *entry = &lkp_tbl[idx];
+
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ }
+#endif
+}
+
+/**
+ * Find virtually contiguous memory chunk in a given MR.
+ *
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
+ *
+ * @return
+ * Next index to go on lookup.
+ */
+static int
+mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
+ int base_idx)
+{
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
+
+ /* MR for external memory doesn't have memseg list. */
+ if (mr->msl == NULL) {
+ struct ibv_mr *ibv_mr = mr->ibv_mr;
+
+ MLX5_ASSERT(mr->ms_bmp_n == 1);
+ MLX5_ASSERT(mr->ms_n == 1);
+ MLX5_ASSERT(base_idx == 0);
+ /*
+ * Can't search it from memseg list but get it directly from
+ * verbs MR as there's only one chunk.
+ */
+ entry->start = (uintptr_t)ibv_mr->addr;
+ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ /* Returning 1 ends iteration. */
+ return 1;
+ }
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
+ }
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ }
+ return idx;
+}
+
+/**
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_list() in
+ * mlx5_mr_create() on miss.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param mr
+ * Pointer to MR to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr *mr)
+{
+ unsigned int n;
+
+ DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
+ (void *)mr, (void *)share_cache);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mr_cache_entry entry;
+
+ memset(&entry, 0, sizeof(entry));
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Look up address in the original global MR list.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Found MR on match, NULL otherwise.
+ */
+struct mlx5_mr *
+mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr)
+{
+ struct mlx5_mr *mr;
+
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &share_cache->mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mr_cache_entry ret;
+
+ memset(&ret, 0, sizeof(ret));
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Look up address on global MR cache.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+uint32_t
+mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr)
+{
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
+ struct mlx5_mr *mr;
+
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(share_cache->cache.overflow)) {
+ lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*share_cache->cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mlx5_mr_lookup_list(share_cache, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
+ }
+ MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
+}
+
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx5_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
+
+void
+mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
+{
+ struct mlx5_mr *mr;
+
+ DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
+ /* Flush cache to rebuild. */
+ share_cache->cache.len = 1;
+ share_cache->cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &share_cache->mr_list, mr)
+ if (mlx5_mr_insert_cache(share_cache, mr) < 0)
+ return;
+}
+
+/**
+ * Release resources of detached MR having no online entry.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ */
+static void
+mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
+{
+ struct mlx5_mr *mr_next;
+ struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+
+ /* Must be called from the primary process. */
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /*
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
+ */
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = share_cache->mr_free_list;
+ LIST_INIT(&share_cache->mr_free_list);
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
+ }
+}
+
+/* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
+}
+
+/**
+ * Create a new global Memory Region (MR) for a missing virtual address.
+ * This API should be called on a secondary process, then a request is sent to
+ * the primary process in order to create a MR for the address. As the global MR
+ * list is on the shared memory, following LKey lookup should succeed unless the
+ * request fails.
+ *
+ * @param pd
+ * Pointer to ibv_pd of a device (net, regex, vdpa,...).
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ * @param mr_ext_memseg_en
+ * Configurable flag about external memory segment enable or not.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mlx5_mr_create_secondary(struct ibv_pd *pd __rte_unused,
+ struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr,
+ unsigned int mr_ext_memseg_en __rte_unused)
+{
+ int ret;
+
+ DEBUG("port %u requesting MR creation for address (%p)",
+ mp_id->port_id, (void *)addr);
+ ret = mlx5_mp_req_mr_create(mp_id, addr);
+ if (ret) {
+ DEBUG("Fail to request MR creation for address (%p)",
+ (void *)addr);
+ return UINT32_MAX;
+ }
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ /* Fill in output data. */
+ mlx5_mr_lookup_cache(share_cache, entry, addr);
+ /* Lookup can't fail. */
+ MLX5_ASSERT(entry->lkey != UINT32_MAX);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ DEBUG("MR CREATED by primary process for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
+ (void *)addr, entry->start, entry->end, entry->lkey);
+ return entry->lkey;
+}
+
+/**
+ * Create a new global Memory Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param pd
+ * Pointer to ibv_pd of a device (net, regex, vdpa,...).
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ * @param mr_ext_memseg_en
+ * Configurable flag about external memory segment enable or not.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+uint32_t
+mlx5_mr_create_primary(struct ibv_pd *pd,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr,
+ unsigned int mr_ext_memseg_en)
+{
+ struct mr_find_contig_memsegs_data data = {.addr = addr, };
+ struct mr_find_contig_memsegs_data data_re;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx5_mr *mr = NULL;
+ int ms_idx_shift = -1;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ uint32_t ms_n;
+ uint32_t n;
+ size_t len;
+
+ DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx5_mr_garbage_collect(share_cache);
+ /*
+ * If enabled, find out a contiguous virtual address chunk in use, to
+ * which the given address belongs, in order to register maximum range.
+ * In the best case where mempools are not dynamically recreated and
+ * '--socket-mem' is specified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented. As the whole memory
+ * chunk will be pinned by kernel, it can't be reused unless entire
+ * chunk is freed from EAL.
+ *
+ * If disabled, just register one memseg (page). Then, memory
+ * consumption will be minimized but it may drop performance if there
+ * are many MRs to lookup on the datapath.
+ */
+ if (!mr_ext_memseg_en) {
+ data.msl = rte_mem_virt2memseg_list((void *)addr);
+ data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
+ data.end = data.start + data.msl->page_sz;
+ } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ DRV_LOG(WARNING,
+ "Unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.", (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
+ }
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ MLX5_ASSERT(data.msl);
+ MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ MLX5_ASSERT(ms);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DEBUG("Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ (void *)addr, data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ DEBUG("Unable to allocate memory for a new MR of"
+ " address (%p).", (void *)addr);
+ rte_errno = ENOMEM;
+ goto err_nolock;
+ }
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ DEBUG("Unable to initialize bitmap for a new MR of"
+ " address (%p).", (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_mcfg_mem_read_lock();
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ DEBUG("Unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.", (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_mcfg_mem_read_unlock();
+ mr_free(mr);
+ goto alloc_resources;
+ }
+ MLX5_ASSERT(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&share_cache->cache, entry);
+ DEBUG("Found MR for %p on final lookup, abort", (void *)addr);
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ rte_mcfg_mem_read_unlock();
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx5_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
+ }
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mr_cache_entry ret;
+
+ memset(&ret, 0, sizeof(ret));
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
+ UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx5_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx5_glue->reg_mr(pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE |
+ (haswell_broadwell_cpu ? 0 :
+ IBV_ACCESS_RELAXED_ORDERING));
+ if (mr->ibv_mr == NULL) {
+ DEBUG("Fail to create a verbs MR for address (%p)",
+ (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ MLX5_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
+ MLX5_ASSERT(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
+ DEBUG("MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ (void *)mr, (void *)addr, data.start, data.end,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mlx5_mr_insert_cache(share_cache, mr);
+ /* Fill in output data. */
+ mlx5_mr_lookup_cache(share_cache, entry, addr);
+ /* Lookup can't fail. */
+ MLX5_ASSERT(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ rte_mcfg_mem_read_unlock();
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+err_memlock:
+ rte_mcfg_mem_read_unlock();
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
+}
+
+/**
+ * Create a new global Memory Region (MR) for a missing virtual address.
+ * This can be called from primary and secondary process.
+ *
+ * @param pd
+ * Pointer to ibv_pd of a device (net, regex, vdpa,...).
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mlx5_mr_create(struct ibv_pd *pd, struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr,
+ unsigned int mr_ext_memseg_en)
+{
+ uint32_t ret = 0;
+
+ switch (rte_eal_process_type()) {
+ case RTE_PROC_PRIMARY:
+ ret = mlx5_mr_create_primary(pd, share_cache, entry,
+ addr, mr_ext_memseg_en);
+ break;
+ case RTE_PROC_SECONDARY:
+ ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
+ addr, mr_ext_memseg_en);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param pd
+ * Pointer to ibv_pd of a device (net, regex, vdpa,...).
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_lookup_caches(struct ibv_pd *pd, struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr_ctrl *mr_ctrl,
+ struct mr_cache_entry *entry, uintptr_t addr,
+ unsigned int mr_ext_memseg_en)
+{
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint32_t lkey;
+ uint16_t idx;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*share_cache->cache.table)[idx];
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
+ }
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
+ mr_ext_memseg_en);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
+ *
+ * @param pd
+ * Pointer to ibv_pd of a device (net, regex, vdpa,...).
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t mlx5_mr_addr2mr_bh(struct ibv_pd *pd, struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr, unsigned int mr_ext_memseg_en)
+{
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
+ repl, addr, mr_ext_memseg_en);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Release all the created MRs and resources on global MR cache of a device.
+ * list.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ */
+void
+mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
+{
+ struct mlx5_mr *mr_next;
+
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ /* Detach from MR list and move to free list. */
+ mr_next = LIST_FIRST(&share_cache->mr_list);
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
+ }
+ LIST_INIT(&share_cache->mr_list);
+ /* Free global cache. */
+ mlx5_mr_btree_free(&share_cache->cache);
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ /* Free all remaining MRs. */
+ mlx5_mr_garbage_collect(share_cache);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR local cache.
+ */
+void
+mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/**
+ * Creates a memory region for external memory, that is memory which is not
+ * part of the DPDK memory segments.
+ *
+ * @param pd
+ * Pointer to ibv_pd of a device (net, regex, vdpa,...).
+ * @param addr
+ * Starting virtual address of memory.
+ * @param len
+ * Length of memory segment being mapped.
+ * @param socked_id
+ * Socket to allocate heap memory for the control structures.
+ *
+ * @return
+ * Pointer to MR structure on success, NULL otherwise.
+ */
+struct mlx5_mr *
+mlx5_create_mr_ext(struct ibv_pd *pd, uintptr_t addr, size_t len, int socket_id)
+{
+ struct mlx5_mr *mr = NULL;
+
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (mr == NULL)
+ return NULL;
+ mr->ibv_mr = mlx5_glue->reg_mr(pd, (void *)addr, len,
+ IBV_ACCESS_LOCAL_WRITE |
+ (haswell_broadwell_cpu ? 0 :
+ IBV_ACCESS_RELAXED_ORDERING));
+ if (mr->ibv_mr == NULL) {
+ DRV_LOG(WARNING,
+ "Fail to create a verbs MR for address (%p)",
+ (void *)addr);
+ rte_free(mr);
+ return NULL;
+ }
+ mr->msl = NULL; /* Mark it is external memory. */
+ mr->ms_bmp = NULL;
+ mr->ms_n = 1;
+ mr->ms_bmp_n = 1;
+ DRV_LOG(DEBUG,
+ "MR CREATED (%p) for external memory %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ (void *)mr, (void *)addr,
+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ return mr;
+}
+
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param sh
+ * Pointer to Ethernet device shared context.
+ */
+void
+mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
+{
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ struct mlx5_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
+
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &share_cache->mr_list, mr) {
+ unsigned int n;
+
+ DEBUG("MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ mr_n++, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mr_cache_entry ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
+ }
+ DEBUG("Dumping global cache %p", (void *)share_cache);
+ mlx5_mr_btree_dump(&share_cache->cache);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.h
new file mode 100644
index 000000000..4ea47cd92
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_mr.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_COMMON_MR_H_
+#define RTE_PMD_MLX5_COMMON_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+#include <rte_memory.h>
+
+#include "mlx5_common_mp.h"
+
+/* Size of per-queue MR cache array for linear search. */
+#define MLX5_MR_CACHE_N 8
+#define MLX5_MR_BTREE_CACHE_N 256
+
+/* Memory Region object. */
+struct mlx5_mr {
+ LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mr_cache_entry {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx5_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mr_cache_entry (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx5_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+LIST_HEAD(mlx5_mr_list, mlx5_mr);
+
+/* Global per-device MR cache. */
+struct mlx5_mr_share_cache {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR cache Lock. */
+ struct mlx5_mr_btree cache; /* Global MR cache table. */
+ struct mlx5_mr_list mr_list; /* Registered MR list. */
+ struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+} __rte_packed;
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+__rte_internal
+int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
+__rte_internal
+void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
+__rte_internal
+void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
+__rte_internal
+uint32_t mlx5_mr_addr2mr_bh(struct ibv_pd *pd, struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr, unsigned int mr_ext_memseg_en);
+__rte_internal
+void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
+__rte_internal
+void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
+__rte_internal
+void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
+__rte_internal
+void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
+__rte_internal
+int
+mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr *mr);
+__rte_internal
+uint32_t
+mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr);
+__rte_internal
+struct mlx5_mr *
+mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr);
+__rte_internal
+struct mlx5_mr *
+mlx5_create_mr_ext(struct ibv_pd *pd, uintptr_t addr, size_t len,
+ int socket_id);
+__rte_internal
+uint32_t
+mlx5_mr_create_primary(struct ibv_pd *pd,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr,
+ unsigned int mr_ext_memseg_en);
+
+#endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_utils.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_utils.h
new file mode 100644
index 000000000..32c3adfab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_common_utils.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_COMMON_UTILS_H_
+#define RTE_PMD_MLX5_COMMON_UTILS_H_
+
+#include "mlx5_common.h"
+
+
+extern int mlx5_common_logtype;
+
+#define MLX5_COMMON_LOG_PREFIX "common_mlx5"
+/* Generic printf()-like logging macro with automatic line feed. */
+#define DRV_LOG(level, ...) \
+ PMD_DRV_LOG_(level, mlx5_common_logtype, MLX5_COMMON_LOG_PREFIX, \
+ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
+ PMD_DRV_LOG_CPAREN)
+
+#endif /* RTE_PMD_MLX5_COMMON_UTILS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
new file mode 100644
index 000000000..fba485e72
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -0,0 +1,1534 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2018 Mellanox Technologies, Ltd */
+
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_malloc.h>
+
+#include "mlx5_prm.h"
+#include "mlx5_devx_cmds.h"
+#include "mlx5_common_utils.h"
+
+
+/**
+ * Allocate flow counters via devx interface.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param dcs
+ * Pointer to counters properties structure to be filled by the routine.
+ * @param bulk_n_128
+ * Bulk counter numbers in 128 counters units.
+ *
+ * @return
+ * Pointer to counter object on success, a negative value otherwise and
+ * rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128)
+{
+ struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0);
+ uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
+
+ if (!dcs) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);
+ dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
+ sizeof(in), out, sizeof(out));
+ if (!dcs->obj) {
+ DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
+ rte_errno = errno;
+ rte_free(dcs);
+ return NULL;
+ }
+ dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ return dcs;
+}
+
+/**
+ * Query flow counters values.
+ *
+ * @param[in] dcs
+ * devx object that was obtained from mlx5_devx_cmd_fc_alloc.
+ * @param[in] clear
+ * Whether hardware should clear the counters after the query or not.
+ * @param[in] n_counters
+ * 0 in case of 1 counter to read, otherwise the counter number to read.
+ * @param pkts
+ * The number of packets that matched the flow.
+ * @param bytes
+ * The number of bytes that matched the flow.
+ * @param mkey
+ * The mkey key for batch query.
+ * @param addr
+ * The address in the mkey range for batch query.
+ * @param cmd_comp
+ * The completion object for asynchronous batch query.
+ * @param async_id
+ * The ID to be returned in the asynchronous batch query response.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
+ int clear, uint32_t n_counters,
+ uint64_t *pkts, uint64_t *bytes,
+ uint32_t mkey, void *addr,
+ void *cmd_comp,
+ uint64_t async_id)
+{
+ int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter);
+ uint32_t out[out_len];
+ uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ void *stats;
+ int rc;
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);
+ MLX5_SET(query_flow_counter_in, in, clear, !!clear);
+
+ if (n_counters) {
+ MLX5_SET(query_flow_counter_in, in, num_of_counters,
+ n_counters);
+ MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);
+ MLX5_SET(query_flow_counter_in, in, mkey, mkey);
+ MLX5_SET64(query_flow_counter_in, in, address,
+ (uint64_t)(uintptr_t)addr);
+ }
+ if (!cmd_comp)
+ rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
+ out_len);
+ else
+ rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),
+ out_len, async_id,
+ cmd_comp);
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
+ rte_errno = rc;
+ return -rc;
+ }
+ if (!n_counters) {
+ stats = MLX5_ADDR_OF(query_flow_counter_out,
+ out, flow_statistics);
+ *pkts = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ }
+ return 0;
+}
+
+/**
+ * Create a new mkey.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[in] attr
+ * Attributes of the requested mkey.
+ *
+ * @return
+ * Pointer to Devx mkey on success, a negative value otherwise and rte_errno
+ * is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_mkey_create(void *ctx,
+ struct mlx5_devx_mkey_attr *attr)
+{
+ struct mlx5_klm *klm_array = attr->klm_array;
+ int klm_num = attr->klm_num;
+ int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) +
+ (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm);
+ uint32_t in[in_size_dw];
+ uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ void *mkc;
+ struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0);
+ size_t pgsize;
+ uint32_t translation_size;
+
+ if (!mkey) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ memset(in, 0, in_size_dw * 4);
+ pgsize = sysconf(_SC_PAGESIZE);
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ if (klm_num > 0) {
+ int i;
+ uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in,
+ klm_pas_mtt);
+ translation_size = RTE_ALIGN(klm_num, 4);
+ for (i = 0; i < klm_num; i++) {
+ MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count);
+ MLX5_SET(klm, klm, mkey, klm_array[i].mkey);
+ MLX5_SET64(klm, klm, address, klm_array[i].address);
+ klm += MLX5_ST_SZ_BYTES(klm);
+ }
+ for (; i < (int)translation_size; i++) {
+ MLX5_SET(klm, klm, mkey, 0x0);
+ MLX5_SET64(klm, klm, address, 0x0);
+ klm += MLX5_ST_SZ_BYTES(klm);
+ }
+ MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ?
+ MLX5_MKC_ACCESS_MODE_KLM_FBS :
+ MLX5_MKC_ACCESS_MODE_KLM);
+ MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size);
+ } else {
+ translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));
+ }
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ translation_size);
+ MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);
+ MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access);
+ MLX5_SET(mkc, mkc, lw, 0x1);
+ MLX5_SET(mkc, mkc, lr, 0x1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, attr->pd);
+ MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
+ MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
+ if (attr->relaxed_ordering == 1) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1);
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1);
+ }
+ MLX5_SET64(mkc, mkc, start_addr, attr->addr);
+ MLX5_SET64(mkc, mkc, len, attr->size);
+ mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
+ sizeof(out));
+ if (!mkey->obj) {
+ DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n",
+ klm_num ? "an in" : "a ", errno);
+ rte_errno = errno;
+ rte_free(mkey);
+ return NULL;
+ }
+ mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
+ mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);
+ return mkey;
+}
+
+/**
+ * Get status of devx command response.
+ * Mainly used for asynchronous commands.
+ *
+ * @param[in] out
+ * The out response buffer.
+ *
+ * @return
+ * 0 on success, non-zero value otherwise.
+ */
+int
+mlx5_devx_get_out_command_status(void *out)
+{
+ int status;
+
+ if (!out)
+ return -EINVAL;
+ status = MLX5_GET(query_flow_counter_out, out, status);
+ if (status) {
+ int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
+
+ DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
+ syndrome);
+ }
+ return status;
+}
+
+/**
+ * Destroy any object allocated by a Devx API.
+ *
+ * @param[in] obj
+ * Pointer to a general object.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)
+{
+ int ret;
+
+ if (!obj)
+ return 0;
+ ret = mlx5_glue->devx_obj_destroy(obj->obj);
+ rte_free(obj);
+ return ret;
+}
+
+/**
+ * Query NIC vport context.
+ * Fills minimal inline attribute.
+ *
+ * @param[in] ctx
+ * ibv contexts returned from mlx5dv_open_device.
+ * @param[in] vport
+ * vport index
+ * @param[out] attr
+ * Attributes device values.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+static int
+mlx5_devx_cmd_query_nic_vport_context(void *ctx,
+ unsigned int vport,
+ struct mlx5_hca_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ void *vctx;
+ int status, syndrome, rc;
+
+ /* Query NIC vport context to determine inline mode. */
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(query_nic_vport_context_out, out, status);
+ syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query NIC vport context, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context);
+ attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,
+ min_wqe_inline_mode);
+ return 0;
+error:
+ rc = (rc > 0) ? -rc : rc;
+ return rc;
+}
+
+/**
+ * Query NIC vDPA attributes.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[out] vdpa_attr
+ * vDPA Attributes structure to fill.
+ */
+static void
+mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
+ struct mlx5_hca_vdpa_attr *vdpa_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
+ void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ int status, syndrome, rc;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (rc || status) {
+ RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities,"
+ " status %x, syndrome = %x", status, syndrome);
+ vdpa_attr->valid = 0;
+ } else {
+ vdpa_attr->valid = 1;
+ vdpa_attr->desc_tunnel_offload_type =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ desc_tunnel_offload_type);
+ vdpa_attr->eth_frame_offload_type =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ eth_frame_offload_type);
+ vdpa_attr->virtio_version_1_0 =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ virtio_version_1_0);
+ vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr,
+ tso_ipv4);
+ vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr,
+ tso_ipv6);
+ vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
+ tx_csum);
+ vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
+ rx_csum);
+ vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr,
+ event_mode);
+ vdpa_attr->virtio_queue_type =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ virtio_queue_type);
+ vdpa_attr->log_doorbell_stride =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ log_doorbell_stride);
+ vdpa_attr->log_doorbell_bar_size =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ log_doorbell_bar_size);
+ vdpa_attr->doorbell_bar_offset =
+ MLX5_GET64(virtio_emulation_cap, hcattr,
+ doorbell_bar_offset);
+ vdpa_attr->max_num_virtio_queues =
+ MLX5_GET(virtio_emulation_cap, hcattr,
+ max_num_virtio_queues);
+ vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr,
+ umem_1_buffer_param_a);
+ vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr,
+ umem_1_buffer_param_b);
+ vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr,
+ umem_2_buffer_param_a);
+ vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr,
+ umem_2_buffer_param_b);
+ vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr,
+ umem_3_buffer_param_a);
+ vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr,
+ umem_3_buffer_param_b);
+ }
+}
+
+/**
+ * Query HCA attributes.
+ * Using those attributes we can check on run time if the device
+ * is having the required capabilities.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[out] attr
+ * Attributes device values.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_query_hca_attr(void *ctx,
+ struct mlx5_hca_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
+ void *hcattr;
+ int status, syndrome, rc;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in), out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->flow_counter_bulk_alloc_bitmap =
+ MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
+ attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
+ flow_counters_dump);
+ attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr,
+ log_max_rqt_size);
+ attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
+ attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);
+ attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,
+ log_max_hairpin_queues);
+ attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,
+ log_max_hairpin_wq_data_sz);
+ attr->log_max_hairpin_num_packets = MLX5_GET
+ (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
+ attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
+ attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr,
+ relaxed_ordering_write);
+ attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr,
+ relaxed_ordering_read);
+ attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
+ eth_net_offloads);
+ attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
+ attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
+ flex_parser_protocols);
+ attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
+ attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
+ if (attr->qos.sup) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
+ " status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->qos.srtcm_sup =
+ MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
+ attr->qos.log_max_flow_meter =
+ MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
+ attr->qos.flow_meter_reg_c_ids =
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
+ attr->qos.flow_meter_reg_share =
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
+ }
+ if (attr->vdpa.valid)
+ mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
+ if (!attr->eth_net_offloads)
+ return 0;
+
+ /* Query HCA offloads for Ethernet protocol. */
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in),
+ out, sizeof(out));
+ if (rc) {
+ attr->eth_net_offloads = 0;
+ goto error;
+ }
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ attr->eth_net_offloads = 0;
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, wqe_vlan_insert);
+ attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
+ lro_cap);
+ attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_lro_gre);
+ attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_lro_vxlan);
+ attr->lro_max_msg_sz_mode = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, lro_max_msg_sz_mode);
+ for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
+ attr->lro_timer_supported_periods[i] =
+ MLX5_GET(per_protocol_networking_offload_caps, hcattr,
+ lro_timer_supported_periods[i]);
+ }
+ attr->tunnel_stateless_geneve_rx =
+ MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_stateless_geneve_rx);
+ attr->geneve_max_opt_len =
+ MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, max_geneve_opt_len);
+ attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, wqe_inline_mode);
+ attr->tunnel_stateless_gtp = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, tunnel_stateless_gtp);
+ if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+ return 0;
+ if (attr->eth_virt) {
+ rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
+ if (rc) {
+ attr->eth_virt = 0;
+ goto error;
+ }
+ }
+ return 0;
+error:
+ rc = (rc > 0) ? -rc : rc;
+ return rc;
+}
+
+/**
+ * Query TIS transport domain from QP verbs object using DevX API.
+ *
+ * @param[in] qp
+ * Pointer to verbs QP returned by ibv_create_qp .
+ * @param[in] tis_num
+ * TIS number of TIS to query.
+ * @param[out] tis_td
+ * Pointer to TIS transport domain variable, to be set by the routine.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
+ uint32_t *tis_td)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
+ int rc;
+ void *tis_ctx;
+
+ MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);
+ MLX5_SET(query_tis_in, in, tisn, tis_num);
+ rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query QP using DevX");
+ return -rc;
+ };
+ tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
+ *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
+ return 0;
+}
+
+/**
+ * Fill WQ data for DevX API command.
+ * Utility function for use when creating DevX objects containing a WQ.
+ *
+ * @param[in] wq_ctx
+ * Pointer to WQ context to fill with data.
+ * @param [in] wq_attr
+ * Pointer to WQ attributes structure to fill in WQ context.
+ */
+static void
+devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)
+{
+ MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
+ MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
+ MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
+ MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
+ MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
+ MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
+ MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
+ MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);
+ MLX5_SET(wq, wq_ctx, pd, wq_attr->pd);
+ MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
+ MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
+ MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
+ MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
+ MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
+ MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
+ MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
+ MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
+ MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
+ MLX5_SET(wq, wq_ctx, log_hairpin_num_packets,
+ wq_attr->log_hairpin_num_packets);
+ MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);
+ MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
+ wq_attr->single_wqe_log_num_of_strides);
+ MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
+ MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
+ wq_attr->single_stride_log_num_of_bytes);
+ MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
+ MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
+ MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
+}
+
+/**
+ * Create RQ using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] rq_attr
+ * Pointer to create RQ attributes structure.
+ * @param [in] socket
+ * CPU socket ID for allocations.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_rq(void *ctx,
+ struct mlx5_devx_create_rq_attr *rq_attr,
+ int socket)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
+ void *rq_ctx, *wq_ctx;
+ struct mlx5_devx_wq_attr *wq_attr;
+ struct mlx5_devx_obj *rq = NULL;
+
+ rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);
+ if (!rq) {
+ DRV_LOG(ERR, "Failed to allocate RQ data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+ rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
+ MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
+ MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
+ MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
+ MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
+ MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
+ MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
+ MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
+ MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
+ MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
+ MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
+ MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
+ wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
+ wq_attr = &rq_attr->wq_attr;
+ devx_cmd_fill_wq_data(wq_ctx, wq_attr);
+ rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!rq->obj) {
+ DRV_LOG(ERR, "Failed to create RQ using DevX");
+ rte_errno = errno;
+ rte_free(rq);
+ return NULL;
+ }
+ rq->id = MLX5_GET(create_rq_out, out, rqn);
+ return rq;
+}
+
+/**
+ * Modify RQ using DevX API.
+ *
+ * @param[in] rq
+ * Pointer to RQ object structure.
+ * @param [in] rq_attr
+ * Pointer to modify RQ attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
+ struct mlx5_devx_modify_rq_attr *rq_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
+ void *rq_ctx, *wq_ctx;
+ int ret;
+
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+ MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);
+ MLX5_SET(modify_rq_in, in, rqn, rq->id);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);
+ rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
+ if (rq_attr->modify_bitmask &
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)
+ MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
+ if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)
+ MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
+ if (rq_attr->modify_bitmask &
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)
+ MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
+ MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);
+ MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);
+ if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {
+ wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
+ MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);
+ }
+ ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify RQ using DevX");
+ rte_errno = errno;
+ return -errno;
+ }
+ return ret;
+}
+
+/**
+ * Create TIR using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] tir_attr
+ * Pointer to TIR attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_tir(void *ctx,
+ struct mlx5_devx_tir_attr *tir_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
+ void *tir_ctx, *outer, *inner, *rss_key;
+ struct mlx5_devx_obj *tir = NULL;
+
+ tir = rte_calloc(__func__, 1, sizeof(*tir), 0);
+ if (!tir) {
+ DRV_LOG(ERR, "Failed to allocate TIR data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
+ MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
+ tir_attr->lro_timeout_period_usecs);
+ MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);
+ MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);
+ MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
+ MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);
+ MLX5_SET(tirc, tir_ctx, tunneled_offload_en,
+ tir_attr->tunneled_offload_en);
+ MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
+ MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
+ MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
+ MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
+ rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key);
+ memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN);
+ outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
+ tir_attr->rx_hash_field_selector_outer.l3_prot_type);
+ MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
+ tir_attr->rx_hash_field_selector_outer.l4_prot_type);
+ MLX5_SET(rx_hash_field_select, outer, selected_fields,
+ tir_attr->rx_hash_field_selector_outer.selected_fields);
+ inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);
+ MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
+ tir_attr->rx_hash_field_selector_inner.l3_prot_type);
+ MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
+ tir_attr->rx_hash_field_selector_inner.l4_prot_type);
+ MLX5_SET(rx_hash_field_select, inner, selected_fields,
+ tir_attr->rx_hash_field_selector_inner.selected_fields);
+ tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!tir->obj) {
+ DRV_LOG(ERR, "Failed to create TIR using DevX");
+ rte_errno = errno;
+ rte_free(tir);
+ return NULL;
+ }
+ tir->id = MLX5_GET(create_tir_out, out, tirn);
+ return tir;
+}
+
+/**
+ * Create RQT using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] rqt_attr
+ * Pointer to RQT attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_rqt(void *ctx,
+ struct mlx5_devx_rqt_attr *rqt_attr)
+{
+ uint32_t *in = NULL;
+ uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +
+ rqt_attr->rqt_actual_size * sizeof(uint32_t);
+ uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+ void *rqt_ctx;
+ struct mlx5_devx_obj *rqt = NULL;
+ int i;
+
+ in = rte_calloc(__func__, 1, inlen, 0);
+ if (!in) {
+ DRV_LOG(ERR, "Failed to allocate RQT IN data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);
+ if (!rqt) {
+ DRV_LOG(ERR, "Failed to allocate RQT data");
+ rte_errno = ENOMEM;
+ rte_free(in);
+ return NULL;
+ }
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+ MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
+ MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
+ MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
+ for (i = 0; i < rqt_attr->rqt_actual_size; i++)
+ MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
+ rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
+ rte_free(in);
+ if (!rqt->obj) {
+ DRV_LOG(ERR, "Failed to create RQT using DevX");
+ rte_errno = errno;
+ rte_free(rqt);
+ return NULL;
+ }
+ rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
+ return rqt;
+}
+
+/**
+ * Modify RQT using DevX API.
+ *
+ * @param[in] rqt
+ * Pointer to RQT DevX object structure.
+ * @param [in] rqt_attr
+ * Pointer to RQT attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
+ struct mlx5_devx_rqt_attr *rqt_attr)
+{
+ uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) +
+ rqt_attr->rqt_actual_size * sizeof(uint32_t);
+ uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
+ uint32_t *in = rte_calloc(__func__, 1, inlen, 0);
+ void *rqt_ctx;
+ int i;
+ int ret;
+
+ if (!in) {
+ DRV_LOG(ERR, "Failed to allocate RQT modify IN data.");
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+ MLX5_SET(modify_rqt_in, in, rqtn, rqt->id);
+ MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1);
+ rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context);
+ MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
+ MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
+ MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
+ for (i = 0; i < rqt_attr->rqt_actual_size; i++)
+ MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
+ ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out));
+ rte_free(in);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify RQT using DevX.");
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return ret;
+}
+
+/**
+ * Create SQ using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] sq_attr
+ * Pointer to SQ attributes structure.
+ * @param [in] socket
+ * CPU socket ID for allocations.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ **/
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_sq(void *ctx,
+ struct mlx5_devx_create_sq_attr *sq_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
+ void *sq_ctx;
+ void *wq_ctx;
+ struct mlx5_devx_wq_attr *wq_attr;
+ struct mlx5_devx_obj *sq = NULL;
+
+ sq = rte_calloc(__func__, 1, sizeof(*sq), 0);
+ if (!sq) {
+ DRV_LOG(ERR, "Failed to allocate SQ data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);
+ MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);
+ MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);
+ MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);
+ MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,
+ sq_attr->flush_in_error_en);
+ MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,
+ sq_attr->min_wqe_inline_mode);
+ MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
+ MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
+ MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
+ MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
+ MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
+ MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
+ MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
+ sq_attr->packet_pacing_rate_limit_index);
+ MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
+ MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
+ wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
+ wq_attr = &sq_attr->wq_attr;
+ devx_cmd_fill_wq_data(wq_ctx, wq_attr);
+ sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!sq->obj) {
+ DRV_LOG(ERR, "Failed to create SQ using DevX");
+ rte_errno = errno;
+ rte_free(sq);
+ return NULL;
+ }
+ sq->id = MLX5_GET(create_sq_out, out, sqn);
+ return sq;
+}
+
+/**
+ * Modify SQ using DevX API.
+ *
+ * @param[in] sq
+ * Pointer to SQ object structure.
+ * @param [in] sq_attr
+ * Pointer to SQ attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
+ struct mlx5_devx_modify_sq_attr *sq_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ void *sq_ctx;
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);
+ MLX5_SET(modify_sq_in, in, sqn, sq->id);
+ sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
+ MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);
+ MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);
+ ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify SQ using DevX");
+ rte_errno = errno;
+ return -errno;
+ }
+ return ret;
+}
+
+/**
+ * Create TIS using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] tis_attr
+ * Pointer to TIS attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_tis(void *ctx,
+ struct mlx5_devx_tis_attr *tis_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
+ struct mlx5_devx_obj *tis = NULL;
+ void *tis_ctx;
+
+ tis = rte_calloc(__func__, 1, sizeof(*tis), 0);
+ if (!tis) {
+ DRV_LOG(ERR, "Failed to allocate TIS object");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+ tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
+ tis_attr->strict_lag_tx_port_affinity);
+ MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
+ tis_attr->strict_lag_tx_port_affinity);
+ MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
+ MLX5_SET(tisc, tis_ctx, transport_domain,
+ tis_attr->transport_domain);
+ tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!tis->obj) {
+ DRV_LOG(ERR, "Failed to create TIS using DevX");
+ rte_errno = errno;
+ rte_free(tis);
+ return NULL;
+ }
+ tis->id = MLX5_GET(create_tis_out, out, tisn);
+ return tis;
+}
+
+/**
+ * Create transport domain using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_td(void *ctx)
+{
+ uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ struct mlx5_devx_obj *td = NULL;
+
+ td = rte_calloc(__func__, 1, sizeof(*td), 0);
+ if (!td) {
+ DRV_LOG(ERR, "Failed to allocate TD object");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+ td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!td->obj) {
+ DRV_LOG(ERR, "Failed to create TIS using DevX");
+ rte_errno = errno;
+ rte_free(td);
+ return NULL;
+ }
+ td->id = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+ return td;
+}
+
+/**
+ * Dump all flows to file.
+ *
+ * @param[in] fdb_domain
+ * FDB domain.
+ * @param[in] rx_domain
+ * RX domain.
+ * @param[in] tx_domain
+ * TX domain.
+ * @param[out] file
+ * Pointer to file stream.
+ *
+ * @return
+ * 0 on success, a nagative value otherwise.
+ */
+int
+mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
+ void *rx_domain __rte_unused,
+ void *tx_domain __rte_unused, FILE *file __rte_unused)
+{
+ int ret = 0;
+
+#ifdef HAVE_MLX5_DR_FLOW_DUMP
+ if (fdb_domain) {
+ ret = mlx5_glue->dr_dump_domain(file, fdb_domain);
+ if (ret)
+ return ret;
+ }
+ MLX5_ASSERT(rx_domain);
+ ret = mlx5_glue->dr_dump_domain(file, rx_domain);
+ if (ret)
+ return ret;
+ MLX5_ASSERT(tx_domain);
+ ret = mlx5_glue->dr_dump_domain(file, tx_domain);
+#else
+ ret = ENOTSUP;
+#endif
+ return -ret;
+}
+
+/*
+ * Create CQ using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to CQ attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
+ struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj),
+ 0);
+ void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ if (!cq_obj) {
+ DRV_LOG(ERR, "Failed to allocate CQ object memory.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
+ if (attr->db_umem_valid) {
+ MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid);
+ MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id);
+ MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset);
+ } else {
+ MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
+ }
+ MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
+ MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
+ MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
+ MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
+ MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
+ if (attr->q_umem_valid) {
+ MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
+ MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
+ MLX5_SET64(create_cq_in, in, cq_umem_offset,
+ attr->q_umem_offset);
+ }
+ cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!cq_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno);
+ rte_free(cq_obj);
+ return NULL;
+ }
+ cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
+ return cq_obj;
+}
+
+/**
+ * Create VIRTQ using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to VIRTQ attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_virtq(void *ctx,
+ struct mlx5_devx_virtq_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__,
+ sizeof(*virtq_obj), 0);
+ void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
+ void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
+ void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
+
+ if (!virtq_obj) {
+ DRV_LOG(ERR, "Failed to allocate virtq data.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_VIRTQ);
+ MLX5_SET16(virtio_net_q, virtq, hw_available_index,
+ attr->hw_available_index);
+ MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index);
+ MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4);
+ MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6);
+ MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum);
+ MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum);
+ MLX5_SET16(virtio_q, virtctx, virtio_version_1_0,
+ attr->virtio_version_1_0);
+ MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode);
+ MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id);
+ MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr);
+ MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr);
+ MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr);
+ MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
+ MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size);
+ MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey);
+ MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id);
+ MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size);
+ MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset);
+ MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id);
+ MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size);
+ MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset);
+ MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id);
+ MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size);
+ MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset);
+ MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id);
+ virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!virtq_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX.");
+ rte_free(virtq_obj);
+ return NULL;
+ }
+ virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return virtq_obj;
+}
+
+/**
+ * Modify VIRTQ using DevX API.
+ *
+ * @param[in] virtq_obj
+ * Pointer to virtq object structure.
+ * @param [in] attr
+ * Pointer to modify virtq attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
+ struct mlx5_devx_virtq_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
+ void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
+ void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_VIRTQ);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
+ MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type);
+ MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
+ switch (attr->type) {
+ case MLX5_VIRTQ_MODIFY_TYPE_STATE:
+ MLX5_SET16(virtio_net_q, virtq, state, attr->state);
+ break;
+ case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS:
+ MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey,
+ attr->dirty_bitmap_mkey);
+ MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr,
+ attr->dirty_bitmap_addr);
+ MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size,
+ attr->dirty_bitmap_size);
+ break;
+ case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE:
+ MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable,
+ attr->dirty_bitmap_dump_enable);
+ break;
+ default:
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
+ rte_errno = errno;
+ return -errno;
+ }
+ return ret;
+}
+
+/**
+ * Query VIRTQ using DevX API.
+ *
+ * @param[in] virtq_obj
+ * Pointer to virtq object structure.
+ * @param [in/out] attr
+ * Pointer to virtq attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
+ struct mlx5_devx_virtq_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0};
+ void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr);
+ void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq);
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_VIRTQ);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
+ ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
+ rte_errno = errno;
+ return -errno;
+ }
+ attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq,
+ hw_available_index);
+ attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index);
+ return ret;
+}
+
+/**
+ * Create QP using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to QP attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_qp(void *ctx,
+ struct mlx5_devx_qp_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
+ struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj),
+ 0);
+ void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ if (!qp_obj) {
+ DRV_LOG(ERR, "Failed to allocate QP data.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pd, attr->pd);
+ if (attr->uar_index) {
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
+ MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ if (attr->sq_size) {
+ MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
+ MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
+ MLX5_SET(qpc, qpc, log_sq_size,
+ rte_log2_u32(attr->sq_size));
+ } else {
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ }
+ if (attr->rq_size) {
+ MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
+ MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
+ MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride -
+ MLX5_LOG_RQ_STRIDE_SHIFT);
+ MLX5_SET(qpc, qpc, log_rq_size,
+ rte_log2_u32(attr->rq_size));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ } else {
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ }
+ if (attr->dbr_umem_valid) {
+ MLX5_SET(qpc, qpc, dbr_umem_valid,
+ attr->dbr_umem_valid);
+ MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id);
+ }
+ MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address);
+ MLX5_SET64(create_qp_in, in, wq_umem_offset,
+ attr->wq_umem_offset);
+ MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id);
+ MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
+ } else {
+ /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ }
+ qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!qp_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create QP Obj using DevX.");
+ rte_free(qp_obj);
+ return NULL;
+ }
+ qp_obj->id = MLX5_GET(create_qp_out, out, qpn);
+ return qp_obj;
+}
+
+/**
+ * Modify QP using DevX API.
+ * Currently supports only force loop-back QP.
+ *
+ * @param[in] qp
+ * Pointer to QP object structure.
+ * @param [in] qp_st_mod_op
+ * The QP state modification operation.
+ * @param [in] remote_qp_id
+ * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op,
+ uint32_t remote_qp_id)
+{
+ union {
+ uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)];
+ uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)];
+ uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)];
+ } in;
+ union {
+ uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)];
+ uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)];
+ uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)];
+ } out;
+ void *qpc;
+ int ret;
+ unsigned int inlen;
+ unsigned int outlen;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op);
+ switch (qp_st_mod_op) {
+ case MLX5_CMD_OP_RST2INIT_QP:
+ MLX5_SET(rst2init_qp_in, &in, qpn, qp->id);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, rre, 1);
+ MLX5_SET(qpc, qpc, rwe, 1);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ inlen = sizeof(in.rst2init);
+ outlen = sizeof(out.rst2init);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id);
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc);
+ MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, mtu, 1);
+ MLX5_SET(qpc, qpc, log_msg_max, 30);
+ MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 0);
+ inlen = sizeof(in.init2rtr);
+ outlen = sizeof(out.init2rtr);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc);
+ MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id);
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14);
+ MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7);
+ inlen = sizeof(in.rtr2rts);
+ outlen = sizeof(out.rtr2rts);
+ break;
+ default:
+ DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.",
+ qp_st_mod_op);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify QP using DevX.");
+ rte_errno = errno;
+ return -errno;
+ }
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
new file mode 100644
index 000000000..49b174a75
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -0,0 +1,377 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_DEVX_CMDS_H_
+#define RTE_PMD_MLX5_DEVX_CMDS_H_
+
+#include "mlx5_glue.h"
+#include "mlx5_prm.h"
+
+
+/* devX creation object */
+struct mlx5_devx_obj {
+ void *obj; /* The DV object. */
+ int id; /* The object ID. */
+};
+
+/* UMR memory buffer used to define 1 entry in indirect mkey. */
+struct mlx5_klm {
+ uint32_t byte_count;
+ uint32_t mkey;
+ uint64_t address;
+};
+
+/* This is limitation of libibverbs: in length variable type is u16. */
+#define MLX5_DEVX_MAX_KLM_ENTRIES ((UINT16_MAX - \
+ MLX5_ST_SZ_DW(create_mkey_in) * 4) / (MLX5_ST_SZ_DW(klm) * 4))
+
+struct mlx5_devx_mkey_attr {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t umem_id;
+ uint32_t pd;
+ uint32_t log_entity_size;
+ uint32_t pg_access:1;
+ uint32_t relaxed_ordering:1;
+ struct mlx5_klm *klm_array;
+ int klm_num;
+};
+
+/* HCA qos attributes. */
+struct mlx5_hca_qos_attr {
+ uint32_t sup:1; /* Whether QOS is supported. */
+ uint32_t srtcm_sup:1; /* Whether srTCM mode is supported. */
+ uint32_t flow_meter_reg_share:1;
+ /* Whether reg_c share is supported. */
+ uint8_t log_max_flow_meter;
+ /* Power of the maximum supported meters. */
+ uint8_t flow_meter_reg_c_ids;
+ /* Bitmap of the reg_Cs available for flow meter to use. */
+
+};
+
+struct mlx5_hca_vdpa_attr {
+ uint8_t virtio_queue_type;
+ uint32_t valid:1;
+ uint32_t desc_tunnel_offload_type:1;
+ uint32_t eth_frame_offload_type:1;
+ uint32_t virtio_version_1_0:1;
+ uint32_t tso_ipv4:1;
+ uint32_t tso_ipv6:1;
+ uint32_t tx_csum:1;
+ uint32_t rx_csum:1;
+ uint32_t event_mode:3;
+ uint32_t log_doorbell_stride:5;
+ uint32_t log_doorbell_bar_size:5;
+ uint32_t max_num_virtio_queues;
+ struct {
+ uint32_t a;
+ uint32_t b;
+ } umems[3];
+ uint64_t doorbell_bar_offset;
+};
+
+/* HCA supports this number of time periods for LRO. */
+#define MLX5_LRO_NUM_SUPP_PERIODS 4
+
+/* HCA attributes. */
+struct mlx5_hca_attr {
+ uint32_t eswitch_manager:1;
+ uint32_t flow_counters_dump:1;
+ uint32_t log_max_rqt_size:5;
+ uint8_t flow_counter_bulk_alloc_bitmap;
+ uint32_t eth_net_offloads:1;
+ uint32_t eth_virt:1;
+ uint32_t wqe_vlan_insert:1;
+ uint32_t wqe_inline_mode:2;
+ uint32_t vport_inline_mode:3;
+ uint32_t tunnel_stateless_geneve_rx:1;
+ uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
+ uint32_t tunnel_stateless_gtp:1;
+ uint32_t lro_cap:1;
+ uint32_t tunnel_lro_gre:1;
+ uint32_t tunnel_lro_vxlan:1;
+ uint32_t lro_max_msg_sz_mode:2;
+ uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
+ uint32_t flex_parser_protocols;
+ uint32_t hairpin:1;
+ uint32_t log_max_hairpin_queues:5;
+ uint32_t log_max_hairpin_wq_data_sz:5;
+ uint32_t log_max_hairpin_num_packets:5;
+ uint32_t vhca_id:16;
+ uint32_t relaxed_ordering_write:1;
+ uint32_t relaxed_ordering_read:1;
+ struct mlx5_hca_qos_attr qos;
+ struct mlx5_hca_vdpa_attr vdpa;
+};
+
+struct mlx5_devx_wq_attr {
+ uint32_t wq_type:4;
+ uint32_t wq_signature:1;
+ uint32_t end_padding_mode:2;
+ uint32_t cd_slave:1;
+ uint32_t hds_skip_first_sge:1;
+ uint32_t log2_hds_buf_size:3;
+ uint32_t page_offset:5;
+ uint32_t lwm:16;
+ uint32_t pd:24;
+ uint32_t uar_page:24;
+ uint64_t dbr_addr;
+ uint32_t hw_counter;
+ uint32_t sw_counter;
+ uint32_t log_wq_stride:4;
+ uint32_t log_wq_pg_sz:5;
+ uint32_t log_wq_sz:5;
+ uint32_t dbr_umem_valid:1;
+ uint32_t wq_umem_valid:1;
+ uint32_t log_hairpin_num_packets:5;
+ uint32_t log_hairpin_data_sz:5;
+ uint32_t single_wqe_log_num_of_strides:4;
+ uint32_t two_byte_shift_en:1;
+ uint32_t single_stride_log_num_of_bytes:3;
+ uint32_t dbr_umem_id;
+ uint32_t wq_umem_id;
+ uint64_t wq_umem_offset;
+};
+
+/* Create RQ attributes structure, used by create RQ operation. */
+struct mlx5_devx_create_rq_attr {
+ uint32_t rlky:1;
+ uint32_t delay_drop_en:1;
+ uint32_t scatter_fcs:1;
+ uint32_t vsd:1;
+ uint32_t mem_rq_type:4;
+ uint32_t state:4;
+ uint32_t flush_in_error_en:1;
+ uint32_t hairpin:1;
+ uint32_t user_index:24;
+ uint32_t cqn:24;
+ uint32_t counter_set_id:8;
+ uint32_t rmpn:24;
+ struct mlx5_devx_wq_attr wq_attr;
+};
+
+/* Modify RQ attributes structure, used by modify RQ operation. */
+struct mlx5_devx_modify_rq_attr {
+ uint32_t rqn:24;
+ uint32_t rq_state:4; /* Current RQ state. */
+ uint32_t state:4; /* Required RQ state. */
+ uint32_t scatter_fcs:1;
+ uint32_t vsd:1;
+ uint32_t counter_set_id:8;
+ uint32_t hairpin_peer_sq:24;
+ uint32_t hairpin_peer_vhca:16;
+ uint64_t modify_bitmask;
+ uint32_t lwm:16; /* Contained WQ lwm. */
+};
+
+struct mlx5_rx_hash_field_select {
+ uint32_t l3_prot_type:1;
+ uint32_t l4_prot_type:1;
+ uint32_t selected_fields:30;
+};
+
+/* TIR attributes structure, used by TIR operations. */
+struct mlx5_devx_tir_attr {
+ uint32_t disp_type:4;
+ uint32_t lro_timeout_period_usecs:16;
+ uint32_t lro_enable_mask:4;
+ uint32_t lro_max_msg_sz:8;
+ uint32_t inline_rqn:24;
+ uint32_t rx_hash_symmetric:1;
+ uint32_t tunneled_offload_en:1;
+ uint32_t indirect_table:24;
+ uint32_t rx_hash_fn:4;
+ uint32_t self_lb_block:2;
+ uint32_t transport_domain:24;
+ uint8_t rx_hash_toeplitz_key[MLX5_RSS_HASH_KEY_LEN];
+ struct mlx5_rx_hash_field_select rx_hash_field_selector_outer;
+ struct mlx5_rx_hash_field_select rx_hash_field_selector_inner;
+};
+
+/* RQT attributes structure, used by RQT operations. */
+struct mlx5_devx_rqt_attr {
+ uint8_t rq_type;
+ uint32_t rqt_max_size:16;
+ uint32_t rqt_actual_size:16;
+ uint32_t rq_list[];
+};
+
+/* TIS attributes structure. */
+struct mlx5_devx_tis_attr {
+ uint32_t strict_lag_tx_port_affinity:1;
+ uint32_t tls_en:1;
+ uint32_t lag_tx_port_affinity:4;
+ uint32_t prio:4;
+ uint32_t transport_domain:24;
+};
+
+/* SQ attributes structure, used by SQ create operation. */
+struct mlx5_devx_create_sq_attr {
+ uint32_t rlky:1;
+ uint32_t cd_master:1;
+ uint32_t fre:1;
+ uint32_t flush_in_error_en:1;
+ uint32_t allow_multi_pkt_send_wqe:1;
+ uint32_t min_wqe_inline_mode:3;
+ uint32_t state:4;
+ uint32_t reg_umr:1;
+ uint32_t allow_swp:1;
+ uint32_t hairpin:1;
+ uint32_t user_index:24;
+ uint32_t cqn:24;
+ uint32_t packet_pacing_rate_limit_index:16;
+ uint32_t tis_lst_sz:16;
+ uint32_t tis_num:24;
+ struct mlx5_devx_wq_attr wq_attr;
+};
+
+/* SQ attributes structure, used by SQ modify operation. */
+struct mlx5_devx_modify_sq_attr {
+ uint32_t sq_state:4;
+ uint32_t state:4;
+ uint32_t hairpin_peer_rq:24;
+ uint32_t hairpin_peer_vhca:16;
+};
+
+
+/* CQ attributes structure, used by CQ operations. */
+struct mlx5_devx_cq_attr {
+ uint32_t q_umem_valid:1;
+ uint32_t db_umem_valid:1;
+ uint32_t use_first_only:1;
+ uint32_t overrun_ignore:1;
+ uint32_t log_cq_size:5;
+ uint32_t log_page_size:5;
+ uint32_t uar_page_id;
+ uint32_t q_umem_id;
+ uint64_t q_umem_offset;
+ uint32_t db_umem_id;
+ uint64_t db_umem_offset;
+ uint32_t eqn;
+ uint64_t db_addr;
+};
+
+/* Virtq attributes structure, used by VIRTQ operations. */
+struct mlx5_devx_virtq_attr {
+ uint16_t hw_available_index;
+ uint16_t hw_used_index;
+ uint16_t q_size;
+ uint32_t virtio_version_1_0:1;
+ uint32_t tso_ipv4:1;
+ uint32_t tso_ipv6:1;
+ uint32_t tx_csum:1;
+ uint32_t rx_csum:1;
+ uint32_t event_mode:3;
+ uint32_t state:4;
+ uint32_t dirty_bitmap_dump_enable:1;
+ uint32_t dirty_bitmap_mkey;
+ uint32_t dirty_bitmap_size;
+ uint32_t mkey;
+ uint32_t qp_id;
+ uint32_t queue_index;
+ uint32_t tis_id;
+ uint64_t dirty_bitmap_addr;
+ uint64_t type;
+ uint64_t desc_addr;
+ uint64_t used_addr;
+ uint64_t available_addr;
+ struct {
+ uint32_t id;
+ uint32_t size;
+ uint64_t offset;
+ } umems[3];
+};
+
+
+struct mlx5_devx_qp_attr {
+ uint32_t pd:24;
+ uint32_t uar_index:24;
+ uint32_t cqn:24;
+ uint32_t log_page_size:5;
+ uint32_t rq_size:17; /* Must be power of 2. */
+ uint32_t log_rq_stride:3;
+ uint32_t sq_size:17; /* Must be power of 2. */
+ uint32_t dbr_umem_valid:1;
+ uint32_t dbr_umem_id;
+ uint64_t dbr_address;
+ uint32_t wq_umem_id;
+ uint64_t wq_umem_offset;
+};
+
+/* mlx5_devx_cmds.c */
+
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(void *ctx,
+ uint32_t bulk_sz);
+__rte_internal
+int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);
+__rte_internal
+int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
+ int clear, uint32_t n_counters,
+ uint64_t *pkts, uint64_t *bytes,
+ uint32_t mkey, void *addr,
+ void *cmd_comp,
+ uint64_t async_id);
+__rte_internal
+int mlx5_devx_cmd_query_hca_attr(void *ctx,
+ struct mlx5_hca_attr *attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(void *ctx,
+ struct mlx5_devx_mkey_attr *attr);
+__rte_internal
+int mlx5_devx_get_out_command_status(void *out);
+__rte_internal
+int mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
+ uint32_t *tis_td);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(void *ctx,
+ struct mlx5_devx_create_rq_attr *rq_attr,
+ int socket);
+__rte_internal
+int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
+ struct mlx5_devx_modify_rq_attr *rq_attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(void *ctx,
+ struct mlx5_devx_tir_attr *tir_attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(void *ctx,
+ struct mlx5_devx_rqt_attr *rqt_attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(void *ctx,
+ struct mlx5_devx_create_sq_attr *sq_attr);
+__rte_internal
+int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
+ struct mlx5_devx_modify_sq_attr *sq_attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(void *ctx,
+ struct mlx5_devx_tis_attr *tis_attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_td(void *ctx);
+__rte_internal
+int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,
+ FILE *file);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_cq(void *ctx,
+ struct mlx5_devx_cq_attr *attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_virtq(void *ctx,
+ struct mlx5_devx_virtq_attr *attr);
+__rte_internal
+int mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
+ struct mlx5_devx_virtq_attr *attr);
+__rte_internal
+int mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
+ struct mlx5_devx_virtq_attr *attr);
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_qp(void *ctx,
+ struct mlx5_devx_qp_attr *attr);
+__rte_internal
+int mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp,
+ uint32_t qp_st_mod_op, uint32_t remote_qp_id);
+__rte_internal
+int mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
+ struct mlx5_devx_rqt_attr *rqt_attr);
+
+#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.c b/src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.c
new file mode 100644
index 000000000..f270f677b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.c
@@ -0,0 +1,1294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
+#include "mlx5_glue.h"
+
+static int
+mlx5_glue_fork_init(void)
+{
+ return ibv_fork_init();
+}
+
+static struct ibv_pd *
+mlx5_glue_alloc_pd(struct ibv_context *context)
+{
+ return ibv_alloc_pd(context);
+}
+
+static int
+mlx5_glue_dealloc_pd(struct ibv_pd *pd)
+{
+ return ibv_dealloc_pd(pd);
+}
+
+static struct ibv_device **
+mlx5_glue_get_device_list(int *num_devices)
+{
+ return ibv_get_device_list(num_devices);
+}
+
+static void
+mlx5_glue_free_device_list(struct ibv_device **list)
+{
+ ibv_free_device_list(list);
+}
+
+static struct ibv_context *
+mlx5_glue_open_device(struct ibv_device *device)
+{
+ return ibv_open_device(device);
+}
+
+static int
+mlx5_glue_close_device(struct ibv_context *context)
+{
+ return ibv_close_device(context);
+}
+
+static int
+mlx5_glue_query_device(struct ibv_context *context,
+ struct ibv_device_attr *device_attr)
+{
+ return ibv_query_device(context, device_attr);
+}
+
+static int
+mlx5_glue_query_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr)
+{
+ return ibv_query_device_ex(context, input, attr);
+}
+
+static int
+mlx5_glue_query_rt_values_ex(struct ibv_context *context,
+ struct ibv_values_ex *values)
+{
+ return ibv_query_rt_values_ex(context, values);
+}
+
+static int
+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr)
+{
+ return ibv_query_port(context, port_num, port_attr);
+}
+
+static struct ibv_comp_channel *
+mlx5_glue_create_comp_channel(struct ibv_context *context)
+{
+ return ibv_create_comp_channel(context);
+}
+
+static int
+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
+{
+ return ibv_destroy_comp_channel(channel);
+}
+
+static struct ibv_cq *
+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
+ struct ibv_comp_channel *channel, int comp_vector)
+{
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
+}
+
+static int
+mlx5_glue_destroy_cq(struct ibv_cq *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context)
+{
+ return ibv_get_cq_event(channel, cq, cq_context);
+}
+
+static void
+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
+{
+ ibv_ack_cq_events(cq, nevents);
+}
+
+static struct ibv_rwq_ind_table *
+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr)
+{
+ return ibv_create_rwq_ind_table(context, init_attr);
+}
+
+static int
+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
+{
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
+}
+
+static struct ibv_wq *
+mlx5_glue_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr)
+{
+ return ibv_create_wq(context, wq_init_attr);
+}
+
+static int
+mlx5_glue_destroy_wq(struct ibv_wq *wq)
+{
+ return ibv_destroy_wq(wq);
+}
+static int
+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
+{
+ return ibv_modify_wq(wq, wq_attr);
+}
+
+static struct ibv_flow *
+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
+{
+ return ibv_create_flow(qp, flow);
+}
+
+static int
+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)
+{
+ return ibv_destroy_flow(flow_id);
+}
+
+static int
+mlx5_glue_destroy_flow_action(void *action)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_destroy(action);
+#else
+ struct mlx5dv_flow_action_attr *attr = action;
+ int res = 0;
+ switch (attr->type) {
+ case MLX5DV_FLOW_ACTION_TAG:
+ break;
+ default:
+ res = ibv_destroy_flow_action(attr->action);
+ break;
+ }
+ free(action);
+ return res;
+#endif
+#else
+ (void)action;
+ return ENOTSUP;
+#endif
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
+{
+ return ibv_create_qp(pd, qp_init_attr);
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp_ex(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
+{
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
+}
+
+static int
+mlx5_glue_destroy_qp(struct ibv_qp *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ return ibv_modify_qp(qp, attr, attr_mask);
+}
+
+static struct ibv_mr *
+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
+{
+ return ibv_reg_mr(pd, addr, length, access);
+}
+
+static struct ibv_mr *
+mlx5_glue_alloc_null_mr(struct ibv_pd *pd)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return ibv_alloc_null_mr(pd);
+#else
+ (void)pd;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dereg_mr(struct ibv_mr *mr)
+{
+ return ibv_dereg_mr(mr);
+}
+
+static struct ibv_counter_set *
+mlx5_glue_create_counter_set(struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)context;
+ (void)init_attr;
+ return NULL;
+#else
+ return ibv_create_counter_set(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)cs;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counter_set(cs);
+#endif
+}
+
+static int
+mlx5_glue_describe_counter_set(struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)context;
+ (void)counter_set_id;
+ (void)cs_desc;
+ return ENOTSUP;
+#else
+ return ibv_describe_counter_set(context, counter_set_id, cs_desc);
+#endif
+}
+
+static int
+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)query_attr;
+ (void)cs_data;
+ return ENOTSUP;
+#else
+ return ibv_query_counter_set(query_attr, cs_data);
+#endif
+}
+
+static struct ibv_counters *
+mlx5_glue_create_counters(struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)context;
+ (void)init_attr;
+ errno = ENOTSUP;
+ return NULL;
+#else
+ return ibv_create_counters(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counters(struct ibv_counters *counters)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counters(counters);
+#endif
+}
+
+static int
+mlx5_glue_attach_counters(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ (void)attr;
+ (void)flow;
+ return ENOTSUP;
+#else
+ return ibv_attach_counters_point_flow(counters, attr, flow);
+#endif
+}
+
+static int
+mlx5_glue_query_counters(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ (void)counters_value;
+ (void)ncounters;
+ (void)flags;
+ return ENOTSUP;
+#else
+ return ibv_read_counters(counters, counters_value, ncounters, flags);
+#endif
+}
+
+static void
+mlx5_glue_ack_async_event(struct ibv_async_event *event)
+{
+ ibv_ack_async_event(event);
+}
+
+static int
+mlx5_glue_get_async_event(struct ibv_context *context,
+ struct ibv_async_event *event)
+{
+ return ibv_get_async_event(context, event);
+}
+
+static const char *
+mlx5_glue_port_state_str(enum ibv_port_state port_state)
+{
+ return ibv_port_state_str(port_state);
+}
+
+static struct ibv_cq *
+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)
+{
+ return ibv_cq_ex_to_cq(cq);
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_dest_table(tbl);
+#else
+ (void)tbl;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)
+{
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ return mlx5dv_dr_action_create_dest_ib_port(domain, port);
+#else
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+ return mlx5dv_dr_action_create_dest_vport(domain, port);
+#else
+ (void)domain;
+ (void)port;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_drop(void)
+{
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+ return mlx5dv_dr_action_create_drop();
+#else
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_push_vlan(struct mlx5dv_dr_domain *domain,
+ rte_be32_t vlan_tag)
+{
+#ifdef HAVE_MLX5DV_DR_VLAN
+ return mlx5dv_dr_action_create_push_vlan(domain, vlan_tag);
+#else
+ (void)domain;
+ (void)vlan_tag;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_pop_vlan(void)
+{
+#ifdef HAVE_MLX5DV_DR_VLAN
+ return mlx5dv_dr_action_create_pop_vlan();
+#else
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_tbl(void *domain, uint32_t level)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_table_create(domain, level);
+#else
+ (void)domain;
+ (void)level;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_table_destroy(tbl);
+#else
+ (void)tbl;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_domain(struct ibv_context *ctx,
+ enum mlx5dv_dr_domain_type domain)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_domain_create(ctx, domain);
+#else
+ (void)ctx;
+ (void)domain;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_domain(void *domain)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_domain_destroy(domain);
+#else
+ (void)domain;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static struct ibv_cq_ex *
+mlx5_glue_dv_create_cq(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr)
+{
+ return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
+}
+
+static struct ibv_wq *
+mlx5_glue_dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr)
+{
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ (void)context;
+ (void)wq_attr;
+ (void)mlx5_wq_attr;
+ errno = ENOTSUP;
+ return NULL;
+#else
+ return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);
+#endif
+}
+
+static int
+mlx5_glue_dv_query_device(struct ibv_context *ctx,
+ struct mlx5dv_context *attrs_out)
+{
+ return mlx5dv_query_device(ctx, attrs_out);
+}
+
+static int
+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type, void *attr)
+{
+ return mlx5dv_set_context_attr(ibv_ctx, type, attr);
+}
+
+static int
+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
+{
+ return mlx5dv_init_obj(obj, obj_type);
+}
+
+static struct ibv_qp *
+mlx5_glue_dv_create_qp(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);
+#else
+ (void)context;
+ (void)qp_init_attr_ex;
+ (void)dv_qp_init_attr;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)context;
+ return mlx5dv_dr_matcher_create(tbl, matcher_attr->priority,
+ matcher_attr->match_criteria_enable,
+ matcher_attr->match_mask);
+#else
+ (void)tbl;
+ return mlx5dv_create_flow_matcher(context, matcher_attr);
+#endif
+#else
+ (void)context;
+ (void)matcher_attr;
+ (void)tbl;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow(void *matcher,
+ void *match_value,
+ size_t num_actions,
+ void *actions[])
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_rule_create(matcher, match_value, num_actions,
+ (struct mlx5dv_dr_action **)actions);
+#else
+ struct mlx5dv_flow_action_attr actions_attr[8];
+
+ if (num_actions > 8)
+ return NULL;
+ for (size_t i = 0; i < num_actions; i++)
+ actions_attr[i] =
+ *((struct mlx5dv_flow_action_attr *)(actions[i]));
+ return mlx5dv_create_flow(matcher, match_value,
+ num_actions, actions_attr);
+#endif
+#else
+ (void)matcher;
+ (void)match_value;
+ (void)num_actions;
+ (void)actions;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_flow_counter(counter_obj, offset);
+#else
+ struct mlx5dv_flow_action_attr *action;
+
+ (void)offset;
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;
+ action->obj = counter_obj;
+ return action;
+#endif
+#else
+ (void)counter_obj;
+ (void)offset;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_dest_ibv_qp(qp);
+#else
+ struct mlx5dv_flow_action_attr *action;
+
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
+ action->obj = qp;
+ return action;
+#endif
+#else
+ (void)qp;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_dest_devx_tir(void *tir)
+{
+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
+ return mlx5dv_dr_action_create_dest_devx_tir(tir);
+#else
+ (void)tir;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_modify_header
+ (struct ibv_context *ctx,
+ enum mlx5dv_flow_table_type ft_type,
+ void *domain, uint64_t flags,
+ size_t actions_sz,
+ uint64_t actions[])
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_action_create_modify_header(domain, flags, actions_sz,
+ (__be64 *)actions);
+#else
+ struct mlx5dv_flow_action_attr *action;
+
+ (void)domain;
+ (void)flags;
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ action->action = mlx5dv_create_flow_action_modify_header
+ (ctx, actions_sz, actions, ft_type);
+ return action;
+#endif
+#else
+ (void)ctx;
+ (void)ft_type;
+ (void)domain;
+ (void)flags;
+ (void)actions_sz;
+ (void)actions;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_packet_reformat
+ (struct ibv_context *ctx,
+ enum mlx5dv_flow_action_packet_reformat_type reformat_type,
+ enum mlx5dv_flow_table_type ft_type,
+ struct mlx5dv_dr_domain *domain,
+ uint32_t flags, size_t data_sz, void *data)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_action_create_packet_reformat(domain, flags,
+ reformat_type, data_sz,
+ data);
+#else
+ (void)domain;
+ (void)flags;
+ struct mlx5dv_flow_action_attr *action;
+
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ action->action = mlx5dv_create_flow_action_packet_reformat
+ (ctx, data_sz, data, reformat_type, ft_type);
+ return action;
+#endif
+#else
+ (void)ctx;
+ (void)reformat_type;
+ (void)ft_type;
+ (void)domain;
+ (void)flags;
+ (void)data_sz;
+ (void)data;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_tag(tag);
+#else
+ struct mlx5dv_flow_action_attr *action;
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_TAG;
+ action->tag_value = tag;
+ return action;
+#endif
+#endif
+ (void)tag;
+ errno = ENOTSUP;
+ return NULL;
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_meter(struct mlx5dv_dr_flow_meter_attr *attr)
+{
+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
+ return mlx5dv_dr_action_create_flow_meter(attr);
+#else
+ (void)attr;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dv_modify_flow_action_meter(void *action,
+ struct mlx5dv_dr_flow_meter_attr *attr,
+ uint64_t modify_bits)
+{
+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
+ return mlx5dv_dr_action_modify_flow_meter(action, attr, modify_bits);
+#else
+ (void)action;
+ (void)attr;
+ (void)modify_bits;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow(void *flow_id)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_rule_destroy(flow_id);
+#else
+ return ibv_destroy_flow(flow_id);
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow_matcher(void *matcher)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_matcher_destroy(matcher);
+#else
+ return mlx5dv_destroy_flow_matcher(matcher);
+#endif
+#else
+ (void)matcher;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static struct ibv_context *
+mlx5_glue_dv_open_device(struct ibv_device *device)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_open_device(device,
+ &(struct mlx5dv_context_attr){
+ .flags = MLX5DV_CONTEXT_FLAGS_DEVX,
+ });
+#else
+ (void)device;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static struct mlx5dv_devx_obj *
+mlx5_glue_devx_obj_create(struct ibv_context *ctx,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_create(ctx, in, inlen, out, outlen);
+#else
+ (void)ctx;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_destroy(struct mlx5dv_devx_obj *obj)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_destroy(obj);
+#else
+ (void)obj;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_query(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_query(obj, in, inlen, out, outlen);
+#else
+ (void)obj;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_modify(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_modify(obj, in, inlen, out, outlen);
+#else
+ (void)obj;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_general_cmd(struct ibv_context *ctx,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_general_cmd(ctx, in, inlen, out, outlen);
+#else
+ (void)ctx;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ return -ENOTSUP;
+#endif
+}
+
+static struct mlx5dv_devx_cmd_comp *
+mlx5_glue_devx_create_cmd_comp(struct ibv_context *ctx)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ return mlx5dv_devx_create_cmd_comp(ctx);
+#else
+ (void)ctx;
+ errno = -ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void
+mlx5_glue_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ mlx5dv_devx_destroy_cmd_comp(cmd_comp);
+#else
+ (void)cmd_comp;
+ errno = -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,
+ size_t inlen, size_t outlen, uint64_t wr_id,
+ struct mlx5dv_devx_cmd_comp *cmd_comp)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ return mlx5dv_devx_obj_query_async(obj, in, inlen, outlen, wr_id,
+ cmd_comp);
+#else
+ (void)obj;
+ (void)in;
+ (void)inlen;
+ (void)outlen;
+ (void)wr_id;
+ (void)cmd_comp;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,
+ struct mlx5dv_devx_async_cmd_hdr *cmd_resp,
+ size_t cmd_resp_len)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ return mlx5dv_devx_get_async_cmd_comp(cmd_comp, cmd_resp,
+ cmd_resp_len);
+#else
+ (void)cmd_comp;
+ (void)cmd_resp;
+ (void)cmd_resp_len;
+ return -ENOTSUP;
+#endif
+}
+
+static struct mlx5dv_devx_umem *
+mlx5_glue_devx_umem_reg(struct ibv_context *context, void *addr, size_t size,
+ uint32_t access)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_umem_reg(context, addr, size, access);
+#else
+ (void)context;
+ (void)addr;
+ (void)size;
+ (void)access;
+ errno = -ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_devx_umem_dereg(struct mlx5dv_devx_umem *dv_devx_umem)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_umem_dereg(dv_devx_umem);
+#else
+ (void)dv_devx_umem;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_qp_query(struct ibv_qp *qp,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_QP
+ return mlx5dv_devx_qp_query(qp, in, inlen, out, outlen);
+#else
+ (void)qp;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static int
+mlx5_glue_devx_port_query(struct ibv_context *ctx,
+ uint32_t port_num,
+ struct mlx5dv_devx_port *mlx5_devx_port)
+{
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ return mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);
+#else
+ (void)ctx;
+ (void)port_num;
+ (void)mlx5_devx_port;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static int
+mlx5_glue_dr_dump_domain(FILE *file, void *domain)
+{
+#ifdef HAVE_MLX5_DR_FLOW_DUMP
+ return mlx5dv_dump_dr_domain(file, domain);
+#else
+ RTE_SET_USED(file);
+ RTE_SET_USED(domain);
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_query_eqn(struct ibv_context *ctx, uint32_t cpus,
+ uint32_t *eqn)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_query_eqn(ctx, cpus, eqn);
+#else
+ (void)ctx;
+ (void)cpus;
+ (void)eqn;
+ return -ENOTSUP;
+#endif
+}
+
+static struct mlx5dv_devx_event_channel *
+mlx5_glue_devx_create_event_channel(struct ibv_context *ctx, int flags)
+{
+#ifdef HAVE_IBV_DEVX_EVENT
+ return mlx5dv_devx_create_event_channel(ctx, flags);
+#else
+ (void)ctx;
+ (void)flags;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void
+mlx5_glue_devx_destroy_event_channel(struct mlx5dv_devx_event_channel *eventc)
+{
+#ifdef HAVE_IBV_DEVX_EVENT
+ mlx5dv_devx_destroy_event_channel(eventc);
+#else
+ (void)eventc;
+#endif
+}
+
+static int
+mlx5_glue_devx_subscribe_devx_event(struct mlx5dv_devx_event_channel *eventc,
+ struct mlx5dv_devx_obj *obj,
+ uint16_t events_sz, uint16_t events_num[],
+ uint64_t cookie)
+{
+#ifdef HAVE_IBV_DEVX_EVENT
+ return mlx5dv_devx_subscribe_devx_event(eventc, obj, events_sz,
+ events_num, cookie);
+#else
+ (void)eventc;
+ (void)obj;
+ (void)events_sz;
+ (void)events_num;
+ (void)cookie;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_subscribe_devx_event_fd(struct mlx5dv_devx_event_channel *eventc,
+ int fd, struct mlx5dv_devx_obj *obj,
+ uint16_t event_num)
+{
+#ifdef HAVE_IBV_DEVX_EVENT
+ return mlx5dv_devx_subscribe_devx_event_fd(eventc, fd, obj, event_num);
+#else
+ (void)eventc;
+ (void)fd;
+ (void)obj;
+ (void)event_num;
+ return -ENOTSUP;
+#endif
+}
+
+static ssize_t
+mlx5_glue_devx_get_event(struct mlx5dv_devx_event_channel *eventc,
+ struct mlx5dv_devx_async_event_hdr *event_data,
+ size_t event_resp_len)
+{
+#ifdef HAVE_IBV_DEVX_EVENT
+ return mlx5dv_devx_get_event(eventc, event_data, event_resp_len);
+#else
+ (void)eventc;
+ (void)event_data;
+ (void)event_resp_len;
+ errno = ENOTSUP;
+ return -1;
+#endif
+}
+
+static struct mlx5dv_devx_uar *
+mlx5_glue_devx_alloc_uar(struct ibv_context *context, uint32_t flags)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_alloc_uar(context, flags);
+#else
+ (void)context;
+ (void)flags;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void
+mlx5_glue_devx_free_uar(struct mlx5dv_devx_uar *devx_uar)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ mlx5dv_devx_free_uar(devx_uar);
+#else
+ (void)devx_uar;
+#endif
+}
+
+static struct mlx5dv_var *
+mlx5_glue_dv_alloc_var(struct ibv_context *context, uint32_t flags)
+{
+#ifdef HAVE_IBV_VAR
+ return mlx5dv_alloc_var(context, flags);
+#else
+ (void)context;
+ (void)flags;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void
+mlx5_glue_dv_free_var(struct mlx5dv_var *var)
+{
+#ifdef HAVE_IBV_VAR
+ mlx5dv_free_var(var);
+#else
+ (void)var;
+ errno = ENOTSUP;
+#endif
+}
+
+__rte_cache_aligned
+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
+ .version = MLX5_GLUE_VERSION,
+ .fork_init = mlx5_glue_fork_init,
+ .alloc_pd = mlx5_glue_alloc_pd,
+ .dealloc_pd = mlx5_glue_dealloc_pd,
+ .get_device_list = mlx5_glue_get_device_list,
+ .free_device_list = mlx5_glue_free_device_list,
+ .open_device = mlx5_glue_open_device,
+ .close_device = mlx5_glue_close_device,
+ .query_device = mlx5_glue_query_device,
+ .query_device_ex = mlx5_glue_query_device_ex,
+ .query_rt_values_ex = mlx5_glue_query_rt_values_ex,
+ .query_port = mlx5_glue_query_port,
+ .create_comp_channel = mlx5_glue_create_comp_channel,
+ .destroy_comp_channel = mlx5_glue_destroy_comp_channel,
+ .create_cq = mlx5_glue_create_cq,
+ .destroy_cq = mlx5_glue_destroy_cq,
+ .get_cq_event = mlx5_glue_get_cq_event,
+ .ack_cq_events = mlx5_glue_ack_cq_events,
+ .create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,
+ .create_wq = mlx5_glue_create_wq,
+ .destroy_wq = mlx5_glue_destroy_wq,
+ .modify_wq = mlx5_glue_modify_wq,
+ .create_flow = mlx5_glue_create_flow,
+ .destroy_flow = mlx5_glue_destroy_flow,
+ .destroy_flow_action = mlx5_glue_destroy_flow_action,
+ .create_qp = mlx5_glue_create_qp,
+ .create_qp_ex = mlx5_glue_create_qp_ex,
+ .destroy_qp = mlx5_glue_destroy_qp,
+ .modify_qp = mlx5_glue_modify_qp,
+ .reg_mr = mlx5_glue_reg_mr,
+ .alloc_null_mr = mlx5_glue_alloc_null_mr,
+ .dereg_mr = mlx5_glue_dereg_mr,
+ .create_counter_set = mlx5_glue_create_counter_set,
+ .destroy_counter_set = mlx5_glue_destroy_counter_set,
+ .describe_counter_set = mlx5_glue_describe_counter_set,
+ .query_counter_set = mlx5_glue_query_counter_set,
+ .create_counters = mlx5_glue_create_counters,
+ .destroy_counters = mlx5_glue_destroy_counters,
+ .attach_counters = mlx5_glue_attach_counters,
+ .query_counters = mlx5_glue_query_counters,
+ .ack_async_event = mlx5_glue_ack_async_event,
+ .get_async_event = mlx5_glue_get_async_event,
+ .port_state_str = mlx5_glue_port_state_str,
+ .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+ .dr_create_flow_action_dest_flow_tbl =
+ mlx5_glue_dr_create_flow_action_dest_flow_tbl,
+ .dr_create_flow_action_dest_port =
+ mlx5_glue_dr_create_flow_action_dest_port,
+ .dr_create_flow_action_drop =
+ mlx5_glue_dr_create_flow_action_drop,
+ .dr_create_flow_action_push_vlan =
+ mlx5_glue_dr_create_flow_action_push_vlan,
+ .dr_create_flow_action_pop_vlan =
+ mlx5_glue_dr_create_flow_action_pop_vlan,
+ .dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,
+ .dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
+ .dr_create_domain = mlx5_glue_dr_create_domain,
+ .dr_destroy_domain = mlx5_glue_dr_destroy_domain,
+ .dv_create_cq = mlx5_glue_dv_create_cq,
+ .dv_create_wq = mlx5_glue_dv_create_wq,
+ .dv_query_device = mlx5_glue_dv_query_device,
+ .dv_set_context_attr = mlx5_glue_dv_set_context_attr,
+ .dv_init_obj = mlx5_glue_dv_init_obj,
+ .dv_create_qp = mlx5_glue_dv_create_qp,
+ .dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
+ .dv_create_flow = mlx5_glue_dv_create_flow,
+ .dv_create_flow_action_counter =
+ mlx5_glue_dv_create_flow_action_counter,
+ .dv_create_flow_action_dest_ibv_qp =
+ mlx5_glue_dv_create_flow_action_dest_ibv_qp,
+ .dv_create_flow_action_dest_devx_tir =
+ mlx5_glue_dv_create_flow_action_dest_devx_tir,
+ .dv_create_flow_action_modify_header =
+ mlx5_glue_dv_create_flow_action_modify_header,
+ .dv_create_flow_action_packet_reformat =
+ mlx5_glue_dv_create_flow_action_packet_reformat,
+ .dv_create_flow_action_tag = mlx5_glue_dv_create_flow_action_tag,
+ .dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,
+ .dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,
+ .dv_destroy_flow = mlx5_glue_dv_destroy_flow,
+ .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
+ .dv_open_device = mlx5_glue_dv_open_device,
+ .devx_obj_create = mlx5_glue_devx_obj_create,
+ .devx_obj_destroy = mlx5_glue_devx_obj_destroy,
+ .devx_obj_query = mlx5_glue_devx_obj_query,
+ .devx_obj_modify = mlx5_glue_devx_obj_modify,
+ .devx_general_cmd = mlx5_glue_devx_general_cmd,
+ .devx_create_cmd_comp = mlx5_glue_devx_create_cmd_comp,
+ .devx_destroy_cmd_comp = mlx5_glue_devx_destroy_cmd_comp,
+ .devx_obj_query_async = mlx5_glue_devx_obj_query_async,
+ .devx_get_async_cmd_comp = mlx5_glue_devx_get_async_cmd_comp,
+ .devx_umem_reg = mlx5_glue_devx_umem_reg,
+ .devx_umem_dereg = mlx5_glue_devx_umem_dereg,
+ .devx_qp_query = mlx5_glue_devx_qp_query,
+ .devx_port_query = mlx5_glue_devx_port_query,
+ .dr_dump_domain = mlx5_glue_dr_dump_domain,
+ .devx_query_eqn = mlx5_glue_devx_query_eqn,
+ .devx_create_event_channel = mlx5_glue_devx_create_event_channel,
+ .devx_destroy_event_channel = mlx5_glue_devx_destroy_event_channel,
+ .devx_subscribe_devx_event = mlx5_glue_devx_subscribe_devx_event,
+ .devx_subscribe_devx_event_fd = mlx5_glue_devx_subscribe_devx_event_fd,
+ .devx_get_event = mlx5_glue_devx_get_event,
+ .devx_alloc_uar = mlx5_glue_devx_alloc_uar,
+ .devx_free_uar = mlx5_glue_devx_free_uar,
+ .dv_alloc_var = mlx5_glue_dv_alloc_var,
+ .dv_free_var = mlx5_glue_dv_free_var,
+};
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.h
new file mode 100644
index 000000000..81d6a2267
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_glue.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX5_GLUE_H_
+#define MLX5_GLUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_byteorder.h>
+
+#include "mlx5_autoconf.h"
+
+#ifndef MLX5_GLUE_VERSION
+#define MLX5_GLUE_VERSION ""
+#endif
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+struct ibv_counter_set;
+struct ibv_counter_set_data;
+struct ibv_counter_set_description;
+struct ibv_counter_set_init_attr;
+struct ibv_query_counter_set_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+struct ibv_counters;
+struct ibv_counters_init_attr;
+struct ibv_counter_attach_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+struct mlx5dv_qp_init_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+struct mlx5dv_wq_init_attr;
+#endif
+
+#ifndef HAVE_IBV_FLOW_DV_SUPPORT
+struct mlx5dv_flow_matcher;
+struct mlx5dv_flow_matcher_attr;
+struct mlx5dv_flow_action_attr;
+struct mlx5dv_flow_match_parameters;
+struct mlx5dv_dr_flow_meter_attr;
+struct ibv_flow_action;
+enum mlx5dv_flow_action_packet_reformat_type { packet_reformat_type = 0, };
+enum mlx5dv_flow_table_type { flow_table_type = 0, };
+#endif
+
+#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
+#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
+#endif
+
+#ifndef HAVE_IBV_DEVX_OBJ
+struct mlx5dv_devx_obj;
+struct mlx5dv_devx_umem { uint32_t umem_id; };
+struct mlx5dv_devx_uar { void *reg_addr; void *base_addr; uint32_t page_id; };
+#endif
+
+#ifndef HAVE_IBV_DEVX_ASYNC
+struct mlx5dv_devx_cmd_comp;
+struct mlx5dv_devx_async_cmd_hdr;
+#endif
+
+#ifndef HAVE_MLX5DV_DR
+enum mlx5dv_dr_domain_type { unused, };
+struct mlx5dv_dr_domain;
+#endif
+
+#ifndef HAVE_MLX5DV_DR_DEVX_PORT
+struct mlx5dv_devx_port;
+#endif
+
+#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER
+struct mlx5dv_dr_flow_meter_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVX_EVENT
+struct mlx5dv_devx_event_channel { int fd; };
+struct mlx5dv_devx_async_event_hdr;
+#define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA 1
+#endif
+
+#ifndef HAVE_IBV_VAR
+struct mlx5dv_var { uint32_t page_id; uint32_t length; off_t mmap_off;
+ uint64_t comp_mask; };
+#endif
+
+#ifndef HAVE_IBV_RELAXED_ORDERING
+#define IBV_ACCESS_RELAXED_ORDERING 0
+#endif
+
+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
+struct mlx5_glue {
+ const char *version;
+ int (*fork_init)(void);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_device **(*get_device_list)(int *num_devices);
+ void (*free_device_list)(struct ibv_device **list);
+ struct ibv_context *(*open_device)(struct ibv_device *device);
+ int (*close_device)(struct ibv_context *context);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr);
+ int (*query_rt_values_ex)(struct ibv_context *context,
+ struct ibv_values_ex *values);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ struct ibv_comp_channel *(*create_comp_channel)
+ (struct ibv_context *context);
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ void *cq_context,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
+ struct ibv_cq **cq, void **cq_context);
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
+ (struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow);
+ int (*destroy_flow)(struct ibv_flow *flow_id);
+ int (*destroy_flow_action)(void *action);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp *(*create_qp_ex)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+ struct ibv_mr *(*alloc_null_mr)(struct ibv_pd *pd);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_counter_set *(*create_counter_set)
+ (struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr);
+ int (*destroy_counter_set)(struct ibv_counter_set *cs);
+ int (*describe_counter_set)
+ (struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc);
+ int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data);
+ struct ibv_counters *(*create_counters)
+ (struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr);
+ int (*destroy_counters)(struct ibv_counters *counters);
+ int (*attach_counters)(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow);
+ int (*query_counters)(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags);
+ void (*ack_async_event)(struct ibv_async_event *event);
+ int (*get_async_event)(struct ibv_context *context,
+ struct ibv_async_event *event);
+ const char *(*port_state_str)(enum ibv_port_state port_state);
+ struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+ void *(*dr_create_flow_action_dest_flow_tbl)(void *tbl);
+ void *(*dr_create_flow_action_dest_port)(void *domain,
+ uint32_t port);
+ void *(*dr_create_flow_action_drop)();
+ void *(*dr_create_flow_action_push_vlan)
+ (struct mlx5dv_dr_domain *domain,
+ rte_be32_t vlan_tag);
+ void *(*dr_create_flow_action_pop_vlan)();
+ void *(*dr_create_flow_tbl)(void *domain, uint32_t level);
+ int (*dr_destroy_flow_tbl)(void *tbl);
+ void *(*dr_create_domain)(struct ibv_context *ctx,
+ enum mlx5dv_dr_domain_type domain);
+ int (*dr_destroy_domain)(void *domain);
+ struct ibv_cq_ex *(*dv_create_cq)
+ (struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr);
+ struct ibv_wq *(*dv_create_wq)
+ (struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr);
+ int (*dv_query_device)(struct ibv_context *ctx_in,
+ struct mlx5dv_context *attrs_out);
+ int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type,
+ void *attr);
+ int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
+ struct ibv_qp *(*dv_create_qp)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr);
+ void *(*dv_create_flow_matcher)
+ (struct ibv_context *context,
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl);
+ void *(*dv_create_flow)(void *matcher, void *match_value,
+ size_t num_actions, void *actions[]);
+ void *(*dv_create_flow_action_counter)(void *obj, uint32_t offset);
+ void *(*dv_create_flow_action_dest_ibv_qp)(void *qp);
+ void *(*dv_create_flow_action_dest_devx_tir)(void *tir);
+ void *(*dv_create_flow_action_modify_header)
+ (struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,
+ void *domain, uint64_t flags, size_t actions_sz,
+ uint64_t actions[]);
+ void *(*dv_create_flow_action_packet_reformat)
+ (struct ibv_context *ctx,
+ enum mlx5dv_flow_action_packet_reformat_type reformat_type,
+ enum mlx5dv_flow_table_type ft_type,
+ struct mlx5dv_dr_domain *domain,
+ uint32_t flags, size_t data_sz, void *data);
+ void *(*dv_create_flow_action_tag)(uint32_t tag);
+ void *(*dv_create_flow_action_meter)
+ (struct mlx5dv_dr_flow_meter_attr *attr);
+ int (*dv_modify_flow_action_meter)(void *action,
+ struct mlx5dv_dr_flow_meter_attr *attr, uint64_t modify_bits);
+ int (*dv_destroy_flow)(void *flow);
+ int (*dv_destroy_flow_matcher)(void *matcher);
+ struct ibv_context *(*dv_open_device)(struct ibv_device *device);
+ struct mlx5dv_var *(*dv_alloc_var)(struct ibv_context *context,
+ uint32_t flags);
+ void (*dv_free_var)(struct mlx5dv_var *var);
+ struct mlx5dv_devx_uar *(*devx_alloc_uar)(struct ibv_context *context,
+ uint32_t flags);
+ void (*devx_free_uar)(struct mlx5dv_devx_uar *devx_uar);
+ struct mlx5dv_devx_obj *(*devx_obj_create)
+ (struct ibv_context *ctx,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_obj_destroy)(struct mlx5dv_devx_obj *obj);
+ int (*devx_obj_query)(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_obj_modify)(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_general_cmd)(struct ibv_context *context,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ struct mlx5dv_devx_cmd_comp *(*devx_create_cmd_comp)
+ (struct ibv_context *context);
+ void (*devx_destroy_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp);
+ int (*devx_obj_query_async)(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ size_t outlen, uint64_t wr_id,
+ struct mlx5dv_devx_cmd_comp *cmd_comp);
+ int (*devx_get_async_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp,
+ struct mlx5dv_devx_async_cmd_hdr *resp,
+ size_t cmd_resp_len);
+ struct mlx5dv_devx_umem *(*devx_umem_reg)(struct ibv_context *context,
+ void *addr, size_t size,
+ uint32_t access);
+ int (*devx_umem_dereg)(struct mlx5dv_devx_umem *dv_devx_umem);
+ int (*devx_qp_query)(struct ibv_qp *qp,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_port_query)(struct ibv_context *ctx,
+ uint32_t port_num,
+ struct mlx5dv_devx_port *mlx5_devx_port);
+ int (*dr_dump_domain)(FILE *file, void *domain);
+ int (*devx_query_eqn)(struct ibv_context *context, uint32_t cpus,
+ uint32_t *eqn);
+ struct mlx5dv_devx_event_channel *(*devx_create_event_channel)
+ (struct ibv_context *context, int flags);
+ void (*devx_destroy_event_channel)
+ (struct mlx5dv_devx_event_channel *event_channel);
+ int (*devx_subscribe_devx_event)
+ (struct mlx5dv_devx_event_channel *event_channel,
+ struct mlx5dv_devx_obj *obj,
+ uint16_t events_sz,
+ uint16_t events_num[],
+ uint64_t cookie);
+ int (*devx_subscribe_devx_event_fd)
+ (struct mlx5dv_devx_event_channel *event_channel,
+ int fd,
+ struct mlx5dv_devx_obj *obj,
+ uint16_t event_num);
+ ssize_t (*devx_get_event)
+ (struct mlx5dv_devx_event_channel *event_channel,
+ struct mlx5dv_devx_async_event_hdr *event_data,
+ size_t event_resp_len);
+};
+
+extern const struct mlx5_glue *mlx5_glue;
+
+#endif /* MLX5_GLUE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.c b/src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.c
new file mode 100644
index 000000000..1a1033a40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.c
@@ -0,0 +1,1721 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <linux/if_link.h>
+#include <linux/rtnetlink.h>
+#include <linux/genetlink.h>
+#include <net/if.h>
+#include <rdma/rdma_netlink.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdalign.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_atomic.h>
+
+#include "mlx5_nl.h"
+#include "mlx5_common_utils.h"
+#ifdef HAVE_DEVLINK
+#include <linux/devlink.h>
+#endif
+
+
+/* Size of the buffer to receive kernel messages */
+#define MLX5_NL_BUF_SIZE (32 * 1024)
+/* Send buffer size for the Netlink socket */
+#define MLX5_SEND_BUF_SIZE 32768
+/* Receive buffer size for the Netlink socket */
+#define MLX5_RECV_BUF_SIZE 32768
+
+/** Parameters of VLAN devices created by driver. */
+#define MLX5_VMWA_VLAN_DEVICE_PFX "evmlx"
+/*
+ * Define NDA_RTA as defined in iproute2 sources.
+ *
+ * see in iproute2 sources file include/libnetlink.h
+ */
+#ifndef MLX5_NDA_RTA
+#define MLX5_NDA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
+#endif
+/*
+ * Define NLMSG_TAIL as defined in iproute2 sources.
+ *
+ * see in iproute2 sources file include/libnetlink.h
+ */
+#ifndef NLMSG_TAIL
+#define NLMSG_TAIL(nmsg) \
+ ((struct rtattr *)(((char *)(nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
+#endif
+/*
+ * The following definitions are normally found in rdma/rdma_netlink.h,
+ * however they are so recent that most systems do not expose them yet.
+ */
+#ifndef HAVE_RDMA_NL_NLDEV
+#define RDMA_NL_NLDEV 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_GET
+#define RDMA_NLDEV_CMD_GET 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET
+#define RDMA_NLDEV_CMD_PORT_GET 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX
+#define RDMA_NLDEV_ATTR_DEV_INDEX 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME
+#define RDMA_NLDEV_ATTR_DEV_NAME 2
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX
+#define RDMA_NLDEV_ATTR_PORT_INDEX 3
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX
+#define RDMA_NLDEV_ATTR_NDEV_INDEX 50
+#endif
+
+/* These are normally found in linux/if_link.h. */
+#ifndef HAVE_IFLA_NUM_VF
+#define IFLA_NUM_VF 21
+#endif
+#ifndef HAVE_IFLA_EXT_MASK
+#define IFLA_EXT_MASK 29
+#endif
+#ifndef HAVE_IFLA_PHYS_SWITCH_ID
+#define IFLA_PHYS_SWITCH_ID 36
+#endif
+#ifndef HAVE_IFLA_PHYS_PORT_NAME
+#define IFLA_PHYS_PORT_NAME 38
+#endif
+
+/*
+ * Some Devlink defines may be missed in old kernel versions,
+ * adjust used defines.
+ */
+#ifndef DEVLINK_GENL_NAME
+#define DEVLINK_GENL_NAME "devlink"
+#endif
+#ifndef DEVLINK_GENL_VERSION
+#define DEVLINK_GENL_VERSION 1
+#endif
+#ifndef DEVLINK_ATTR_BUS_NAME
+#define DEVLINK_ATTR_BUS_NAME 1
+#endif
+#ifndef DEVLINK_ATTR_DEV_NAME
+#define DEVLINK_ATTR_DEV_NAME 2
+#endif
+#ifndef DEVLINK_ATTR_PARAM
+#define DEVLINK_ATTR_PARAM 80
+#endif
+#ifndef DEVLINK_ATTR_PARAM_NAME
+#define DEVLINK_ATTR_PARAM_NAME 81
+#endif
+#ifndef DEVLINK_ATTR_PARAM_TYPE
+#define DEVLINK_ATTR_PARAM_TYPE 83
+#endif
+#ifndef DEVLINK_ATTR_PARAM_VALUES_LIST
+#define DEVLINK_ATTR_PARAM_VALUES_LIST 84
+#endif
+#ifndef DEVLINK_ATTR_PARAM_VALUE
+#define DEVLINK_ATTR_PARAM_VALUE 85
+#endif
+#ifndef DEVLINK_ATTR_PARAM_VALUE_DATA
+#define DEVLINK_ATTR_PARAM_VALUE_DATA 86
+#endif
+#ifndef DEVLINK_ATTR_PARAM_VALUE_CMODE
+#define DEVLINK_ATTR_PARAM_VALUE_CMODE 87
+#endif
+#ifndef DEVLINK_PARAM_CMODE_DRIVERINIT
+#define DEVLINK_PARAM_CMODE_DRIVERINIT 1
+#endif
+#ifndef DEVLINK_CMD_RELOAD
+#define DEVLINK_CMD_RELOAD 37
+#endif
+#ifndef DEVLINK_CMD_PARAM_GET
+#define DEVLINK_CMD_PARAM_GET 38
+#endif
+#ifndef DEVLINK_CMD_PARAM_SET
+#define DEVLINK_CMD_PARAM_SET 39
+#endif
+#ifndef NLA_FLAG
+#define NLA_FLAG 6
+#endif
+
+/* Add/remove MAC address through Netlink */
+struct mlx5_nl_mac_addr {
+ struct rte_ether_addr (*mac)[];
+ /**< MAC address handled by the device. */
+ int mac_n; /**< Number of addresses in the array. */
+};
+
+#define MLX5_NL_CMD_GET_IB_NAME (1 << 0)
+#define MLX5_NL_CMD_GET_IB_INDEX (1 << 1)
+#define MLX5_NL_CMD_GET_NET_INDEX (1 << 2)
+#define MLX5_NL_CMD_GET_PORT_INDEX (1 << 3)
+
+/** Data structure used by mlx5_nl_cmdget_cb(). */
+struct mlx5_nl_ifindex_data {
+ const char *name; /**< IB device name (in). */
+ uint32_t flags; /**< found attribute flags (out). */
+ uint32_t ibindex; /**< IB device index (out). */
+ uint32_t ifindex; /**< Network interface index (out). */
+ uint32_t portnum; /**< IB device max port number (out). */
+};
+
+rte_atomic32_t atomic_sn = RTE_ATOMIC32_INIT(0);
+
+/* Generate Netlink sequence number. */
+#define MLX5_NL_SN_GENERATE ((uint32_t)rte_atomic32_add_return(&atomic_sn, 1))
+
+/**
+ * Opens a Netlink socket.
+ *
+ * @param protocol
+ * Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA).
+ *
+ * @return
+ * A file descriptor on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+int
+mlx5_nl_init(int protocol)
+{
+ int fd;
+ int sndbuf_size = MLX5_SEND_BUF_SIZE;
+ int rcvbuf_size = MLX5_RECV_BUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ };
+ int ret;
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
+ if (fd == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = bind(fd, (struct sockaddr *)&local, sizeof(local));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ return fd;
+error:
+ close(fd);
+ return -rte_errno;
+}
+
+/**
+ * Send a request message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] ssn
+ * Sequence number.
+ * @param[in] req
+ * Pointer to the request structure.
+ * @param[in] len
+ * Length of the request in bytes.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_request(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn, void *req,
+ int len)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov[2] = {
+ { .iov_base = nh, .iov_len = sizeof(*nh), },
+ { .iov_base = req, .iov_len = len, },
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = iov,
+ .msg_iovlen = 2,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Send a message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] sn
+ * Sequence number.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_send(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Receive a message from the kernel on the Netlink socket, following
+ * mlx5_nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] sn
+ * Sequence number.
+ * @param[in] cb
+ * The callback function to call for each Netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
+ void *arg)
+{
+ struct sockaddr_nl sa;
+ void *buf = malloc(MLX5_RECV_BUF_SIZE);
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = MLX5_RECV_BUF_SIZE,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ if (!buf) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ do {
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes == -1) {
+ rte_errno = errno;
+ ret = -rte_errno;
+ goto exit;
+ }
+ nh = (struct nlmsghdr *)buf;
+ } while (nh->nlmsg_seq != sn);
+ for (;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ rte_errno = -err_data->error;
+ ret = -rte_errno;
+ goto exit;
+ }
+ /* Ack message. */
+ ret = 0;
+ goto exit;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE) {
+ ret = 0;
+ goto exit;
+ }
+ multipart = 1;
+ }
+ if (cb) {
+ ret = cb(nh, arg);
+ if (ret < 0)
+ goto exit;
+ }
+ }
+ } while (multipart);
+exit:
+ free(buf);
+ return ret;
+}
+
+/**
+ * Parse Netlink message to retrieve the bridge MAC address.
+ *
+ * @param nh
+ * Pointer to Netlink Message Header.
+ * @param arg
+ * PMD data register with this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_mac_addr *data = arg;
+ struct ndmsg *r = NLMSG_DATA(nh);
+ struct rtattr *attribute;
+ int len;
+
+ len = nh->nlmsg_len - NLMSG_LENGTH(sizeof(*r));
+ for (attribute = MLX5_NDA_RTA(r);
+ RTA_OK(attribute, len);
+ attribute = RTA_NEXT(attribute, len)) {
+ if (attribute->rta_type == NDA_LLADDR) {
+ if (data->mac_n == MLX5_MAX_MAC_ADDRESSES) {
+ DRV_LOG(WARNING,
+ "not enough room to finalize the"
+ " request");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ char m[18];
+
+ rte_ether_format_addr(m, 18, RTA_DATA(attribute));
+ DRV_LOG(DEBUG, "bridge MAC address %s", m);
+#endif
+ memcpy(&(*data->mac)[data->mac_n++],
+ RTA_DATA(attribute), RTE_ETHER_ADDR_LEN);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Get bridge MAC addresses.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param mac[out]
+ * Pointer to the array table of MAC addresses to fill.
+ * Its size should be of MLX5_MAX_MAC_ADDRESSES.
+ * @param mac_n[out]
+ * Number of entries filled in MAC array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_list(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr (*mac)[], int *mac_n)
+{
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifm;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_GETNEIGH,
+ .nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ },
+ .ifm = {
+ .ifi_family = PF_BRIDGE,
+ .ifi_index = iface_idx,
+ },
+ };
+ struct mlx5_nl_mac_addr data = {
+ .mac = mac,
+ .mac_n = 0,
+ };
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ if (nlsk_fd == -1)
+ return 0;
+ ret = mlx5_nl_request(nlsk_fd, &req.hdr, sn, &req.ifm,
+ sizeof(struct ifinfomsg));
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(nlsk_fd, sn, mlx5_nl_mac_addr_cb, &data);
+ if (ret < 0)
+ goto error;
+ *mac_n = data.mac_n;
+ return 0;
+error:
+ DRV_LOG(DEBUG, "Interface %u cannot retrieve MAC address list %s",
+ iface_idx, strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Modify the MAC address neighbour table with Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param mac
+ * MAC address to consider.
+ * @param add
+ * 1 to add the MAC address, 0 to remove the MAC address.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_modify(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac, int add)
+{
+ struct {
+ struct nlmsghdr hdr;
+ struct ndmsg ndm;
+ struct rtattr rta;
+ uint8_t buffer[RTE_ETHER_ADDR_LEN];
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |
+ NLM_F_EXCL | NLM_F_ACK,
+ .nlmsg_type = add ? RTM_NEWNEIGH : RTM_DELNEIGH,
+ },
+ .ndm = {
+ .ndm_family = PF_BRIDGE,
+ .ndm_state = NUD_NOARP | NUD_PERMANENT,
+ .ndm_ifindex = iface_idx,
+ .ndm_flags = NTF_SELF,
+ },
+ .rta = {
+ .rta_type = NDA_LLADDR,
+ .rta_len = RTA_LENGTH(RTE_ETHER_ADDR_LEN),
+ },
+ };
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ if (nlsk_fd == -1)
+ return 0;
+ memcpy(RTA_DATA(&req.rta), mac, RTE_ETHER_ADDR_LEN);
+ req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
+ RTA_ALIGN(req.rta.rta_len);
+ ret = mlx5_nl_send(nlsk_fd, &req.hdr, sn);
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(nlsk_fd, sn, NULL, NULL);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ DRV_LOG(DEBUG,
+ "Interface %u cannot %s MAC address"
+ " %02X:%02X:%02X:%02X:%02X:%02X %s",
+ iface_idx,
+ add ? "add" : "remove",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5],
+ strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Modify the VF MAC address neighbour table with Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param mac
+ * MAC address to consider.
+ * @param vf_index
+ * VF index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_vf_mac_addr_modify(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac, int vf_index)
+{
+ int ret;
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifm;
+ struct rtattr vf_list_rta;
+ struct rtattr vf_info_rta;
+ struct rtattr vf_mac_rta;
+ struct ifla_vf_mac ivm;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ .nlmsg_type = RTM_BASE,
+ },
+ .ifm = {
+ .ifi_index = iface_idx,
+ },
+ .vf_list_rta = {
+ .rta_type = IFLA_VFINFO_LIST,
+ .rta_len = RTA_ALIGN(RTA_LENGTH(0)),
+ },
+ .vf_info_rta = {
+ .rta_type = IFLA_VF_INFO,
+ .rta_len = RTA_ALIGN(RTA_LENGTH(0)),
+ },
+ .vf_mac_rta = {
+ .rta_type = IFLA_VF_MAC,
+ },
+ };
+ struct ifla_vf_mac ivm = {
+ .vf = vf_index,
+ };
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+
+ memcpy(&ivm.mac, mac, RTE_ETHER_ADDR_LEN);
+ memcpy(RTA_DATA(&req.vf_mac_rta), &ivm, sizeof(ivm));
+
+ req.vf_mac_rta.rta_len = RTA_LENGTH(sizeof(ivm));
+ req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
+ RTA_ALIGN(req.vf_list_rta.rta_len) +
+ RTA_ALIGN(req.vf_info_rta.rta_len) +
+ RTA_ALIGN(req.vf_mac_rta.rta_len);
+ req.vf_list_rta.rta_len = RTE_PTR_DIFF(NLMSG_TAIL(&req.hdr),
+ &req.vf_list_rta);
+ req.vf_info_rta.rta_len = RTE_PTR_DIFF(NLMSG_TAIL(&req.hdr),
+ &req.vf_info_rta);
+
+ if (nlsk_fd < 0)
+ return -1;
+ ret = mlx5_nl_send(nlsk_fd, &req.hdr, sn);
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(nlsk_fd, sn, NULL, NULL);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ DRV_LOG(ERR,
+ "representor %u cannot set VF MAC address "
+ "%02X:%02X:%02X:%02X:%02X:%02X : %s",
+ vf_index,
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5],
+ strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Add a MAC address.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param mac_own
+ * BITFIELD_DECLARE array to store the mac.
+ * @param mac
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_add(int nlsk_fd, unsigned int iface_idx,
+ uint64_t *mac_own, struct rte_ether_addr *mac,
+ uint32_t index)
+{
+ int ret;
+
+ ret = mlx5_nl_mac_addr_modify(nlsk_fd, iface_idx, mac, 1);
+ if (!ret) {
+ MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
+ if (index >= MLX5_MAX_MAC_ADDRESSES)
+ return -EINVAL;
+
+ BITFIELD_SET(mac_own, index);
+ }
+ if (ret == -EEXIST)
+ return 0;
+ return ret;
+}
+
+/**
+ * Remove a MAC address.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param mac_own
+ * BITFIELD_DECLARE array to store the mac.
+ * @param mac
+ * MAC address to remove.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_remove(int nlsk_fd, unsigned int iface_idx, uint64_t *mac_own,
+ struct rte_ether_addr *mac, uint32_t index)
+{
+ MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
+ if (index >= MLX5_MAX_MAC_ADDRESSES)
+ return -EINVAL;
+
+ BITFIELD_RESET(mac_own, index);
+ return mlx5_nl_mac_addr_modify(nlsk_fd, iface_idx, mac, 0);
+}
+
+/**
+ * Synchronize Netlink bridge table to the internal table.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param mac_addrs
+ * Mac addresses array to sync.
+ * @param n
+ * @p mac_addrs array size.
+ */
+void
+mlx5_nl_mac_addr_sync(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac_addrs, int n)
+{
+ struct rte_ether_addr macs[n];
+ int macs_n = 0;
+ int i;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_list(nlsk_fd, iface_idx, &macs, &macs_n);
+ if (ret)
+ return;
+ for (i = 0; i != macs_n; ++i) {
+ int j;
+
+ /* Verify the address is not in the array yet. */
+ for (j = 0; j != n; ++j)
+ if (rte_is_same_ether_addr(&macs[i], &mac_addrs[j]))
+ break;
+ if (j != n)
+ continue;
+ /* Find the first entry available. */
+ for (j = 0; j != n; ++j) {
+ if (rte_is_zero_ether_addr(&mac_addrs[j])) {
+ mac_addrs[j] = macs[i];
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Flush all added MAC addresses.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param[in] mac_addrs
+ * Mac addresses array to flush.
+ * @param n
+ * @p mac_addrs array size.
+ * @param mac_own
+ * BITFIELD_DECLARE array to store the mac.
+ */
+void
+mlx5_nl_mac_addr_flush(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac_addrs, int n,
+ uint64_t *mac_own)
+{
+ int i;
+
+ if (n <= 0 || n >= MLX5_MAX_MAC_ADDRESSES)
+ return;
+
+ for (i = n - 1; i >= 0; --i) {
+ struct rte_ether_addr *m = &mac_addrs[i];
+
+ if (BITFIELD_ISSET(mac_own, i))
+ mlx5_nl_mac_addr_remove(nlsk_fd, iface_idx, mac_own, m,
+ i);
+ }
+}
+
+/**
+ * Enable promiscuous / all multicast mode through Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param flags
+ * IFF_PROMISC for promiscuous, IFF_ALLMULTI for allmulti.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_device_flags(int nlsk_fd, unsigned int iface_idx, uint32_t flags,
+ int enable)
+{
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifi;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_NEWLINK,
+ .nlmsg_flags = NLM_F_REQUEST,
+ },
+ .ifi = {
+ .ifi_flags = enable ? flags : 0,
+ .ifi_change = flags,
+ .ifi_index = iface_idx,
+ },
+ };
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ MLX5_ASSERT(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+ if (nlsk_fd < 0)
+ return 0;
+ ret = mlx5_nl_send(nlsk_fd, &req.hdr, sn);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Enable promiscuous mode through Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_promisc(int nlsk_fd, unsigned int iface_idx, int enable)
+{
+ int ret = mlx5_nl_device_flags(nlsk_fd, iface_idx, IFF_PROMISC, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "Interface %u cannot %s promisc mode: Netlink error %s",
+ iface_idx, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Enable all multicast mode through Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] iface_idx
+ * Net device interface index.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_allmulti(int nlsk_fd, unsigned int iface_idx, int enable)
+{
+ int ret = mlx5_nl_device_flags(nlsk_fd, iface_idx, IFF_ALLMULTI,
+ enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "Interface %u cannot %s allmulti : Netlink error %s",
+ iface_idx, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Process network interface information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_cmdget_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_ifindex_data *data = arg;
+ struct mlx5_nl_ifindex_data local = {
+ .flags = 0,
+ };
+ size_t off = NLMSG_HDRLEN;
+
+ if (nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET) &&
+ nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET))
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct nlattr *na = (void *)((uintptr_t)nh + off);
+ void *payload = (void *)((uintptr_t)na + NLA_HDRLEN);
+
+ if (na->nla_len > nh->nlmsg_len - off)
+ goto error;
+ switch (na->nla_type) {
+ case RDMA_NLDEV_ATTR_DEV_INDEX:
+ local.ibindex = *(uint32_t *)payload;
+ local.flags |= MLX5_NL_CMD_GET_IB_INDEX;
+ break;
+ case RDMA_NLDEV_ATTR_DEV_NAME:
+ if (!strcmp(payload, data->name))
+ local.flags |= MLX5_NL_CMD_GET_IB_NAME;
+ break;
+ case RDMA_NLDEV_ATTR_NDEV_INDEX:
+ local.ifindex = *(uint32_t *)payload;
+ local.flags |= MLX5_NL_CMD_GET_NET_INDEX;
+ break;
+ case RDMA_NLDEV_ATTR_PORT_INDEX:
+ local.portnum = *(uint32_t *)payload;
+ local.flags |= MLX5_NL_CMD_GET_PORT_INDEX;
+ break;
+ default:
+ break;
+ }
+ off += NLA_ALIGN(na->nla_len);
+ }
+ /*
+ * It is possible to have multiple messages for all
+ * Infiniband devices in the system with appropriate name.
+ * So we should gather parameters locally and copy to
+ * query context only in case of coinciding device name.
+ */
+ if (local.flags & MLX5_NL_CMD_GET_IB_NAME) {
+ data->flags = local.flags;
+ data->ibindex = local.ibindex;
+ data->ifindex = local.ifindex;
+ data->portnum = local.portnum;
+ }
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get index of network interface associated with some IB device.
+ *
+ * This is the only somewhat safe method to avoid resorting to heuristics
+ * when faced with port representors. Unfortunately it requires at least
+ * Linux 4.17.
+ *
+ * @param nl
+ * Netlink socket of the RDMA kind (NETLINK_RDMA).
+ * @param[in] name
+ * IB device name.
+ * @param[in] pindex
+ * IB device port index, starting from 1
+ * @return
+ * A valid (nonzero) interface index on success, 0 otherwise and rte_errno
+ * is set.
+ */
+unsigned int
+mlx5_nl_ifindex(int nl, const char *name, uint32_t pindex)
+{
+ struct mlx5_nl_ifindex_data data = {
+ .name = name,
+ .flags = 0,
+ .ibindex = 0, /* Determined during first pass. */
+ .ifindex = 0, /* Determined during second pass. */
+ };
+ union {
+ struct nlmsghdr nh;
+ uint8_t buf[NLMSG_HDRLEN +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))];
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(0),
+ .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_GET),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
+ },
+ };
+ struct nlattr *na;
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, sn);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, sn, mlx5_nl_cmdget_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!(data.flags & MLX5_NL_CMD_GET_IB_NAME) ||
+ !(data.flags & MLX5_NL_CMD_GET_IB_INDEX))
+ goto error;
+ data.flags = 0;
+ sn = MLX5_NL_SN_GENERATE;
+ req.nh.nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_PORT_GET);
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.buf) - NLMSG_HDRLEN);
+ na = (void *)((uintptr_t)req.buf + NLMSG_HDRLEN);
+ na->nla_len = NLA_HDRLEN + sizeof(data.ibindex);
+ na->nla_type = RDMA_NLDEV_ATTR_DEV_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &data.ibindex, sizeof(data.ibindex));
+ na = (void *)((uintptr_t)na + NLA_ALIGN(na->nla_len));
+ na->nla_len = NLA_HDRLEN + sizeof(pindex);
+ na->nla_type = RDMA_NLDEV_ATTR_PORT_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &pindex, sizeof(pindex));
+ ret = mlx5_nl_send(nl, &req.nh, sn);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, sn, mlx5_nl_cmdget_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!(data.flags & MLX5_NL_CMD_GET_IB_NAME) ||
+ !(data.flags & MLX5_NL_CMD_GET_IB_INDEX) ||
+ !(data.flags & MLX5_NL_CMD_GET_NET_INDEX) ||
+ !data.ifindex)
+ goto error;
+ return data.ifindex;
+error:
+ rte_errno = ENODEV;
+ return 0;
+}
+
+/**
+ * Get the number of physical ports of given IB device.
+ *
+ * @param nl
+ * Netlink socket of the RDMA kind (NETLINK_RDMA).
+ * @param[in] name
+ * IB device name.
+ *
+ * @return
+ * A valid (nonzero) number of ports on success, 0 otherwise
+ * and rte_errno is set.
+ */
+unsigned int
+mlx5_nl_portnum(int nl, const char *name)
+{
+ struct mlx5_nl_ifindex_data data = {
+ .flags = 0,
+ .name = name,
+ .ifindex = 0,
+ .portnum = 0,
+ };
+ struct nlmsghdr req = {
+ .nlmsg_len = NLMSG_LENGTH(0),
+ .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_GET),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
+ };
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req, sn);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, sn, mlx5_nl_cmdget_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!(data.flags & MLX5_NL_CMD_GET_IB_NAME) ||
+ !(data.flags & MLX5_NL_CMD_GET_IB_INDEX) ||
+ !(data.flags & MLX5_NL_CMD_GET_PORT_INDEX)) {
+ rte_errno = ENODEV;
+ return 0;
+ }
+ if (!data.portnum)
+ rte_errno = EINVAL;
+ return data.portnum;
+}
+
+/**
+ * Analyze gathered port parameters via Netlink to recognize master
+ * and representor devices for E-Switch configuration.
+ *
+ * @param[in] num_vf_set
+ * flag of presence of number of VFs port attribute.
+ * @param[inout] switch_info
+ * Port information, including port name as a number and port name
+ * type if recognized
+ *
+ * @return
+ * master and representor flags are set in switch_info according to
+ * recognized parameters (if any).
+ */
+static void
+mlx5_nl_check_switch_info(bool num_vf_set,
+ struct mlx5_switch_info *switch_info)
+{
+ switch (switch_info->name_type) {
+ case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN:
+ /*
+ * Name is not recognized, assume the master,
+ * check the number of VFs key presence.
+ */
+ switch_info->master = num_vf_set;
+ break;
+ case MLX5_PHYS_PORT_NAME_TYPE_NOTSET:
+ /*
+ * Name is not set, this assumes the legacy naming
+ * schema for master, just check if there is a
+ * number of VFs key.
+ */
+ switch_info->master = num_vf_set;
+ break;
+ case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
+ /* New uplink naming schema recognized. */
+ switch_info->master = 1;
+ break;
+ case MLX5_PHYS_PORT_NAME_TYPE_LEGACY:
+ /* Legacy representors naming schema. */
+ switch_info->representor = !num_vf_set;
+ break;
+ case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
+ /* New representors naming schema. */
+ switch_info->representor = 1;
+ break;
+ }
+}
+
+/**
+ * Process switch information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_switch_info info = {
+ .master = 0,
+ .representor = 0,
+ .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET,
+ .port_name = 0,
+ .switch_id = 0,
+ };
+ size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ bool switch_id_set = false;
+ bool num_vf_set = false;
+
+ if (nh->nlmsg_type != RTM_NEWLINK)
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct rtattr *ra = (void *)((uintptr_t)nh + off);
+ void *payload = RTA_DATA(ra);
+ unsigned int i;
+
+ if (ra->rta_len > nh->nlmsg_len - off)
+ goto error;
+ switch (ra->rta_type) {
+ case IFLA_NUM_VF:
+ num_vf_set = true;
+ break;
+ case IFLA_PHYS_PORT_NAME:
+ mlx5_translate_port_name((char *)payload, &info);
+ break;
+ case IFLA_PHYS_SWITCH_ID:
+ info.switch_id = 0;
+ for (i = 0; i < RTA_PAYLOAD(ra); ++i) {
+ info.switch_id <<= 8;
+ info.switch_id |= ((uint8_t *)payload)[i];
+ }
+ switch_id_set = true;
+ break;
+ }
+ off += RTA_ALIGN(ra->rta_len);
+ }
+ if (switch_id_set) {
+ /* We have some E-Switch configuration. */
+ mlx5_nl_check_switch_info(num_vf_set, &info);
+ }
+ MLX5_ASSERT(!(info.master && info.representor));
+ memcpy(arg, &info, sizeof(info));
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param nl
+ * Netlink socket of the ROUTE kind (NETLINK_ROUTE).
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_switch_info(int nl, unsigned int ifindex,
+ struct mlx5_switch_info *info)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ struct rtattr rta;
+ uint32_t extmask;
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH
+ (sizeof(req.info) +
+ RTA_LENGTH(sizeof(uint32_t))),
+ .nlmsg_type = RTM_GETLINK,
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ },
+ .info = {
+ .ifi_family = AF_UNSPEC,
+ .ifi_index = ifindex,
+ },
+ .rta = {
+ .rta_type = IFLA_EXT_MASK,
+ .rta_len = RTA_LENGTH(sizeof(int32_t)),
+ },
+ .extmask = RTE_LE32(1),
+ };
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nl, sn, mlx5_nl_switch_info_cb, info);
+ if (info->master && info->representor) {
+ DRV_LOG(ERR, "ifindex %u device is recognized as master"
+ " and as representor", ifindex);
+ rte_errno = ENODEV;
+ ret = -rte_errno;
+ }
+ return ret;
+}
+
+/*
+ * Delete VLAN network device by ifindex.
+ *
+ * @param[in] tcf
+ * Context object initialized by mlx5_nl_vlan_vmwa_init().
+ * @param[in] ifindex
+ * Interface index of network device to delete.
+ */
+void
+mlx5_nl_vlan_vmwa_delete(struct mlx5_nl_vlan_vmwa_context *vmwa,
+ uint32_t ifindex)
+{
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_DELLINK,
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ },
+ .info = {
+ .ifi_family = AF_UNSPEC,
+ .ifi_index = ifindex,
+ },
+ };
+
+ if (ifindex) {
+ ret = mlx5_nl_send(vmwa->nl_socket, &req.nh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(vmwa->nl_socket, sn, NULL, NULL);
+ if (ret < 0)
+ DRV_LOG(WARNING, "netlink: error deleting VLAN WA"
+ " ifindex %u, %d", ifindex, ret);
+ }
+}
+
+/* Set of subroutines to build Netlink message. */
+static struct nlattr *
+nl_msg_tail(struct nlmsghdr *nlh)
+{
+ return (struct nlattr *)
+ (((uint8_t *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
+}
+
+static void
+nl_attr_put(struct nlmsghdr *nlh, int type, const void *data, int alen)
+{
+ struct nlattr *nla = nl_msg_tail(nlh);
+
+ nla->nla_type = type;
+ nla->nla_len = NLMSG_ALIGN(sizeof(struct nlattr)) + alen;
+ nlh->nlmsg_len += NLMSG_ALIGN(nla->nla_len);
+
+ if (alen)
+ memcpy((uint8_t *)nla + sizeof(struct nlattr), data, alen);
+}
+
+static struct nlattr *
+nl_attr_nest_start(struct nlmsghdr *nlh, int type)
+{
+ struct nlattr *nest = (struct nlattr *)nl_msg_tail(nlh);
+
+ nl_attr_put(nlh, type, NULL, 0);
+ return nest;
+}
+
+static void
+nl_attr_nest_end(struct nlmsghdr *nlh, struct nlattr *nest)
+{
+ nest->nla_len = (uint8_t *)nl_msg_tail(nlh) - (uint8_t *)nest;
+}
+
+/*
+ * Create network VLAN device with specified VLAN tag.
+ *
+ * @param[in] tcf
+ * Context object initialized by mlx5_nl_vlan_vmwa_init().
+ * @param[in] ifindex
+ * Base network interface index.
+ * @param[in] tag
+ * VLAN tag for VLAN network device to create.
+ */
+uint32_t
+mlx5_nl_vlan_vmwa_create(struct mlx5_nl_vlan_vmwa_context *vmwa,
+ uint32_t ifindex, uint16_t tag)
+{
+ struct nlmsghdr *nlh;
+ struct ifinfomsg *ifm;
+ char name[sizeof(MLX5_VMWA_VLAN_DEVICE_PFX) + 32];
+
+ __rte_cache_aligned
+ uint8_t buf[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct ifinfomsg)) +
+ NLMSG_ALIGN(sizeof(struct nlattr)) * 8 +
+ NLMSG_ALIGN(sizeof(uint32_t)) +
+ NLMSG_ALIGN(sizeof(name)) +
+ NLMSG_ALIGN(sizeof("vlan")) +
+ NLMSG_ALIGN(sizeof(uint32_t)) +
+ NLMSG_ALIGN(sizeof(uint16_t)) + 16];
+ struct nlattr *na_info;
+ struct nlattr *na_vlan;
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+ nlh = (struct nlmsghdr *)buf;
+ nlh->nlmsg_len = sizeof(struct nlmsghdr);
+ nlh->nlmsg_type = RTM_NEWLINK;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |
+ NLM_F_EXCL | NLM_F_ACK;
+ ifm = (struct ifinfomsg *)nl_msg_tail(nlh);
+ nlh->nlmsg_len += sizeof(struct ifinfomsg);
+ ifm->ifi_family = AF_UNSPEC;
+ ifm->ifi_type = 0;
+ ifm->ifi_index = 0;
+ ifm->ifi_flags = IFF_UP;
+ ifm->ifi_change = 0xffffffff;
+ nl_attr_put(nlh, IFLA_LINK, &ifindex, sizeof(ifindex));
+ ret = snprintf(name, sizeof(name), "%s.%u.%u",
+ MLX5_VMWA_VLAN_DEVICE_PFX, ifindex, tag);
+ nl_attr_put(nlh, IFLA_IFNAME, name, ret + 1);
+ na_info = nl_attr_nest_start(nlh, IFLA_LINKINFO);
+ nl_attr_put(nlh, IFLA_INFO_KIND, "vlan", sizeof("vlan"));
+ na_vlan = nl_attr_nest_start(nlh, IFLA_INFO_DATA);
+ nl_attr_put(nlh, IFLA_VLAN_ID, &tag, sizeof(tag));
+ nl_attr_nest_end(nlh, na_vlan);
+ nl_attr_nest_end(nlh, na_info);
+ MLX5_ASSERT(sizeof(buf) >= nlh->nlmsg_len);
+ ret = mlx5_nl_send(vmwa->nl_socket, nlh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(vmwa->nl_socket, sn, NULL, NULL);
+ if (ret < 0) {
+ DRV_LOG(WARNING, "netlink: VLAN %s create failure (%d)", name,
+ ret);
+ }
+ // Try to get ifindex of created or pre-existing device.
+ ret = if_nametoindex(name);
+ if (!ret) {
+ DRV_LOG(WARNING, "VLAN %s failed to get index (%d)", name,
+ errno);
+ return 0;
+ }
+ return ret;
+}
+
+/**
+ * Parse Netlink message to retrieve the general family ID.
+ *
+ * @param nh
+ * Pointer to Netlink Message Header.
+ * @param arg
+ * PMD data register with this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_family_id_cb(struct nlmsghdr *nh, void *arg)
+{
+
+ struct nlattr *tail = RTE_PTR_ADD(nh, nh->nlmsg_len);
+ struct nlattr *nla = RTE_PTR_ADD(nh, NLMSG_ALIGN(sizeof(*nh)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)));
+
+ for (; nla->nla_len && nla < tail;
+ nla = RTE_PTR_ADD(nla, NLMSG_ALIGN(nla->nla_len))) {
+ if (nla->nla_type == CTRL_ATTR_FAMILY_ID) {
+ *(uint16_t *)arg = *(uint16_t *)(nla + 1);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+#define MLX5_NL_MAX_ATTR_SIZE 100
+/**
+ * Get generic netlink family ID.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] name
+ * The family name.
+ *
+ * @return
+ * ID >= 0 on success and @p enable is updated, a negative errno value
+ * otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_generic_family_id_get(int nlsk_fd, const char *name)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *genl;
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int name_size = strlen(name) + 1;
+ int ret;
+ uint16_t id = -1;
+ uint8_t buf[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct nlattr)) +
+ NLMSG_ALIGN(MLX5_NL_MAX_ATTR_SIZE)];
+
+ memset(buf, 0, sizeof(buf));
+ nlh = (struct nlmsghdr *)buf;
+ nlh->nlmsg_len = sizeof(struct nlmsghdr);
+ nlh->nlmsg_type = GENL_ID_CTRL;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ genl = (struct genlmsghdr *)nl_msg_tail(nlh);
+ nlh->nlmsg_len += sizeof(struct genlmsghdr);
+ genl->cmd = CTRL_CMD_GETFAMILY;
+ genl->version = 1;
+ nl_attr_put(nlh, CTRL_ATTR_FAMILY_NAME, name, name_size);
+ ret = mlx5_nl_send(nlsk_fd, nlh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nlsk_fd, sn, mlx5_nl_family_id_cb, &id);
+ if (ret < 0) {
+ DRV_LOG(DEBUG, "Failed to get Netlink %s family ID: %d.", name,
+ ret);
+ return ret;
+ }
+ DRV_LOG(DEBUG, "Netlink \"%s\" family ID is %u.", name, id);
+ return (int)id;
+}
+
+/**
+ * Get Devlink family ID.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ *
+ * @return
+ * ID >= 0 on success and @p enable is updated, a negative errno value
+ * otherwise and rte_errno is set.
+ */
+
+int
+mlx5_nl_devlink_family_id_get(int nlsk_fd)
+{
+ return mlx5_nl_generic_family_id_get(nlsk_fd, DEVLINK_GENL_NAME);
+}
+
+/**
+ * Parse Netlink message to retrieve the ROCE enable status.
+ *
+ * @param nh
+ * Pointer to Netlink Message Header.
+ * @param arg
+ * PMD data register with this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_roce_cb(struct nlmsghdr *nh, void *arg)
+{
+
+ int ret = -EINVAL;
+ int *enable = arg;
+ struct nlattr *tail = RTE_PTR_ADD(nh, nh->nlmsg_len);
+ struct nlattr *nla = RTE_PTR_ADD(nh, NLMSG_ALIGN(sizeof(*nh)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)));
+
+ while (nla->nla_len && nla < tail) {
+ switch (nla->nla_type) {
+ /* Expected nested attributes case. */
+ case DEVLINK_ATTR_PARAM:
+ case DEVLINK_ATTR_PARAM_VALUES_LIST:
+ case DEVLINK_ATTR_PARAM_VALUE:
+ ret = 0;
+ nla += 1;
+ break;
+ case DEVLINK_ATTR_PARAM_VALUE_DATA:
+ *enable = 1;
+ return 0;
+ default:
+ nla = RTE_PTR_ADD(nla, NLMSG_ALIGN(nla->nla_len));
+ }
+ }
+ *enable = 0;
+ return ret;
+}
+
+/**
+ * Get ROCE enable status through Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] family_id
+ * the Devlink family ID.
+ * @param pci_addr
+ * The device PCI address.
+ * @param[out] enable
+ * Where to store the enable status.
+ *
+ * @return
+ * 0 on success and @p enable is updated, a negative errno value otherwise
+ * and rte_errno is set.
+ */
+int
+mlx5_nl_enable_roce_get(int nlsk_fd, int family_id, const char *pci_addr,
+ int *enable)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *genl;
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+ int cur_en = 0;
+ uint8_t buf[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct nlattr)) * 4 +
+ NLMSG_ALIGN(MLX5_NL_MAX_ATTR_SIZE) * 4];
+
+ memset(buf, 0, sizeof(buf));
+ nlh = (struct nlmsghdr *)buf;
+ nlh->nlmsg_len = sizeof(struct nlmsghdr);
+ nlh->nlmsg_type = family_id;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ genl = (struct genlmsghdr *)nl_msg_tail(nlh);
+ nlh->nlmsg_len += sizeof(struct genlmsghdr);
+ genl->cmd = DEVLINK_CMD_PARAM_GET;
+ genl->version = DEVLINK_GENL_VERSION;
+ nl_attr_put(nlh, DEVLINK_ATTR_BUS_NAME, "pci", 4);
+ nl_attr_put(nlh, DEVLINK_ATTR_DEV_NAME, pci_addr, strlen(pci_addr) + 1);
+ nl_attr_put(nlh, DEVLINK_ATTR_PARAM_NAME, "enable_roce", 12);
+ ret = mlx5_nl_send(nlsk_fd, nlh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nlsk_fd, sn, mlx5_nl_roce_cb, &cur_en);
+ if (ret < 0) {
+ DRV_LOG(DEBUG, "Failed to get ROCE enable on device %s: %d.",
+ pci_addr, ret);
+ return ret;
+ }
+ *enable = cur_en;
+ DRV_LOG(DEBUG, "ROCE is %sabled for device \"%s\".",
+ cur_en ? "en" : "dis", pci_addr);
+ return ret;
+}
+
+/**
+ * Reload mlx5 device kernel driver through Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] family_id
+ * the Devlink family ID.
+ * @param pci_addr
+ * The device PCI address.
+ * @param[out] enable
+ * The enable status to set.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_driver_reload(int nlsk_fd, int family_id, const char *pci_addr)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *genl;
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+ uint8_t buf[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct nlattr)) * 2 +
+ NLMSG_ALIGN(MLX5_NL_MAX_ATTR_SIZE) * 2];
+
+ memset(buf, 0, sizeof(buf));
+ nlh = (struct nlmsghdr *)buf;
+ nlh->nlmsg_len = sizeof(struct nlmsghdr);
+ nlh->nlmsg_type = family_id;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ genl = (struct genlmsghdr *)nl_msg_tail(nlh);
+ nlh->nlmsg_len += sizeof(struct genlmsghdr);
+ genl->cmd = DEVLINK_CMD_RELOAD;
+ genl->version = DEVLINK_GENL_VERSION;
+ nl_attr_put(nlh, DEVLINK_ATTR_BUS_NAME, "pci", 4);
+ nl_attr_put(nlh, DEVLINK_ATTR_DEV_NAME, pci_addr, strlen(pci_addr) + 1);
+ ret = mlx5_nl_send(nlsk_fd, nlh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nlsk_fd, sn, NULL, NULL);
+ if (ret < 0) {
+ DRV_LOG(DEBUG, "Failed to reload %s device by Netlink - %d",
+ pci_addr, ret);
+ return ret;
+ }
+ DRV_LOG(DEBUG, "Device \"%s\" was reloaded by Netlink successfully.",
+ pci_addr);
+ return 0;
+}
+
+/**
+ * Set ROCE enable status through Netlink.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] family_id
+ * the Devlink family ID.
+ * @param pci_addr
+ * The device PCI address.
+ * @param[out] enable
+ * The enable status to set.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_enable_roce_set(int nlsk_fd, int family_id, const char *pci_addr,
+ int enable)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *genl;
+ uint32_t sn = MLX5_NL_SN_GENERATE;
+ int ret;
+ uint8_t buf[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct nlattr)) * 6 +
+ NLMSG_ALIGN(MLX5_NL_MAX_ATTR_SIZE) * 6];
+ uint8_t cmode = DEVLINK_PARAM_CMODE_DRIVERINIT;
+ uint8_t ptype = NLA_FLAG;
+;
+
+ memset(buf, 0, sizeof(buf));
+ nlh = (struct nlmsghdr *)buf;
+ nlh->nlmsg_len = sizeof(struct nlmsghdr);
+ nlh->nlmsg_type = family_id;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ genl = (struct genlmsghdr *)nl_msg_tail(nlh);
+ nlh->nlmsg_len += sizeof(struct genlmsghdr);
+ genl->cmd = DEVLINK_CMD_PARAM_SET;
+ genl->version = DEVLINK_GENL_VERSION;
+ nl_attr_put(nlh, DEVLINK_ATTR_BUS_NAME, "pci", 4);
+ nl_attr_put(nlh, DEVLINK_ATTR_DEV_NAME, pci_addr, strlen(pci_addr) + 1);
+ nl_attr_put(nlh, DEVLINK_ATTR_PARAM_NAME, "enable_roce", 12);
+ nl_attr_put(nlh, DEVLINK_ATTR_PARAM_VALUE_CMODE, &cmode, sizeof(cmode));
+ nl_attr_put(nlh, DEVLINK_ATTR_PARAM_TYPE, &ptype, sizeof(ptype));
+ if (enable)
+ nl_attr_put(nlh, DEVLINK_ATTR_PARAM_VALUE_DATA, NULL, 0);
+ ret = mlx5_nl_send(nlsk_fd, nlh, sn);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nlsk_fd, sn, NULL, NULL);
+ if (ret < 0) {
+ DRV_LOG(DEBUG, "Failed to %sable ROCE for device %s by Netlink:"
+ " %d.", enable ? "en" : "dis", pci_addr, ret);
+ return ret;
+ }
+ DRV_LOG(DEBUG, "Device %s ROCE was %sabled by Netlink successfully.",
+ pci_addr, enable ? "en" : "dis");
+ /* Now, need to reload the driver. */
+ return mlx5_nl_driver_reload(nlsk_fd, family_id, pci_addr);
+}
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.h
new file mode 100644
index 000000000..53021e1a7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_nl.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_NL_H_
+#define RTE_PMD_MLX5_NL_H_
+
+#include <linux/netlink.h>
+
+#include <rte_ether.h>
+
+#include "mlx5_common.h"
+
+
+/* VLAN netdev for VLAN workaround. */
+struct mlx5_nl_vlan_dev {
+ uint32_t refcnt;
+ uint32_t ifindex; /**< Own interface index. */
+};
+
+/*
+ * Array of VLAN devices created on the base of VF
+ * used for workaround in virtual environments.
+ */
+struct mlx5_nl_vlan_vmwa_context {
+ int nl_socket;
+ uint32_t vf_ifindex;
+ struct mlx5_nl_vlan_dev vlan_dev[4096];
+};
+
+__rte_internal
+int mlx5_nl_init(int protocol);
+__rte_internal
+int mlx5_nl_mac_addr_add(int nlsk_fd, unsigned int iface_idx, uint64_t *mac_own,
+ struct rte_ether_addr *mac, uint32_t index);
+__rte_internal
+int mlx5_nl_mac_addr_remove(int nlsk_fd, unsigned int iface_idx,
+ uint64_t *mac_own, struct rte_ether_addr *mac,
+ uint32_t index);
+__rte_internal
+void mlx5_nl_mac_addr_sync(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac_addrs, int n);
+__rte_internal
+void mlx5_nl_mac_addr_flush(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac_addrs, int n,
+ uint64_t *mac_own);
+__rte_internal
+int mlx5_nl_promisc(int nlsk_fd, unsigned int iface_idx, int enable);
+__rte_internal
+int mlx5_nl_allmulti(int nlsk_fd, unsigned int iface_idx, int enable);
+__rte_internal
+unsigned int mlx5_nl_portnum(int nl, const char *name);
+__rte_internal
+unsigned int mlx5_nl_ifindex(int nl, const char *name, uint32_t pindex);
+__rte_internal
+int mlx5_nl_vf_mac_addr_modify(int nlsk_fd, unsigned int iface_idx,
+ struct rte_ether_addr *mac, int vf_index);
+__rte_internal
+int mlx5_nl_switch_info(int nl, unsigned int ifindex,
+ struct mlx5_switch_info *info);
+
+__rte_internal
+void mlx5_nl_vlan_vmwa_delete(struct mlx5_nl_vlan_vmwa_context *vmwa,
+ uint32_t ifindex);
+__rte_internal
+uint32_t mlx5_nl_vlan_vmwa_create(struct mlx5_nl_vlan_vmwa_context *vmwa,
+ uint32_t ifindex, uint16_t tag);
+__rte_internal
+int mlx5_nl_devlink_family_id_get(int nlsk_fd);
+__rte_internal
+int mlx5_nl_enable_roce_get(int nlsk_fd, int family_id, const char *pci_addr,
+ int *enable);
+__rte_internal
+int mlx5_nl_driver_reload(int nlsk_fd, int family_id, const char *pci_addr);
+__rte_internal
+int mlx5_nl_enable_roce_set(int nlsk_fd, int family_id, const char *pci_addr,
+ int enable);
+
+#endif /* RTE_PMD_MLX5_NL_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/mlx5_prm.h b/src/spdk/dpdk/drivers/common/mlx5/mlx5_prm.h
new file mode 100644
index 000000000..e4ef2acd3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/mlx5_prm.h
@@ -0,0 +1,2560 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_PRM_H_
+#define RTE_PMD_MLX5_PRM_H_
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <unistd.h>
+
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+
+#include "mlx5_autoconf.h"
+
+/* RSS hash key size. */
+#define MLX5_RSS_HASH_KEY_LEN 40
+
+/* Get CQE owner bit. */
+#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
+
+/* Get CQE format. */
+#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
+
+/* Get CQE opcode. */
+#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
+
+/* Get CQE solicited event. */
+#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
+
+/* Invalidate a CQE. */
+#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
+
+/* WQE Segment sizes in bytes. */
+#define MLX5_WSEG_SIZE 16u
+#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg)
+#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg)
+#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg)
+
+/* WQE/WQEBB size in bytes. */
+#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe)
+
+/*
+ * Max size of a WQE session.
+ * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments,
+ * the WQE size field in Control Segment is 6 bits wide.
+ */
+#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE)
+
+/*
+ * Default minimum number of Tx queues for inlining packets.
+ * If there are less queues as specified we assume we have
+ * no enough CPU resources (cycles) to perform inlining,
+ * the PCIe throughput is not supposed as bottleneck and
+ * inlining is disabled.
+ */
+#define MLX5_INLINE_MAX_TXQS 8u
+#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u
+
+/*
+ * Default packet length threshold to be inlined with
+ * enhanced MPW. If packet length exceeds the threshold
+ * the data are not inlined. Should be aligned in WQEBB
+ * boundary with accounting the title Control and Ethernet
+ * segments.
+ */
+#define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \
+ MLX5_DSEG_MIN_INLINE_SIZE)
+/*
+ * Maximal inline data length sent with enhanced MPW.
+ * Is based on maximal WQE size.
+ */
+#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
+ MLX5_WQE_CSEG_SIZE - \
+ MLX5_WQE_ESEG_SIZE - \
+ MLX5_WQE_DSEG_SIZE + \
+ MLX5_DSEG_MIN_INLINE_SIZE)
+/*
+ * Minimal amount of packets to be sent with EMPW.
+ * This limits the minimal required size of sent EMPW.
+ * If there are no enough resources to built minimal
+ * EMPW the sending loop exits.
+ */
+#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)
+/*
+ * Maximal amount of packets to be sent with EMPW.
+ * This value is not recommended to exceed MLX5_TX_COMP_THRESH,
+ * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs
+ * without CQE generation request, being multiplied by
+ * MLX5_TX_COMP_MAX_CQE it may cause significant latency
+ * in tx burst routine at the moment of freeing multiple mbufs.
+ */
+#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH
+#define MLX5_MPW_MAX_PACKETS 6
+#define MLX5_MPW_INLINE_MAX_PACKETS 6
+
+/*
+ * Default packet length threshold to be inlined with
+ * ordinary SEND. Inlining saves the MR key search
+ * and extra PCIe data fetch transaction, but eats the
+ * CPU cycles.
+ */
+#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \
+ MLX5_ESEG_MIN_INLINE_SIZE - \
+ MLX5_WQE_CSEG_SIZE - \
+ MLX5_WQE_ESEG_SIZE - \
+ MLX5_WQE_DSEG_SIZE)
+/*
+ * Maximal inline data length sent with ordinary SEND.
+ * Is based on maximal WQE size.
+ */
+#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
+ MLX5_WQE_CSEG_SIZE - \
+ MLX5_WQE_ESEG_SIZE - \
+ MLX5_WQE_DSEG_SIZE + \
+ MLX5_ESEG_MIN_INLINE_SIZE)
+
+/* Missed in mlv5dv.h, should define here. */
+#define MLX5_OPCODE_ENHANCED_MPSW 0x29u
+
+/* CQE value to inform that VLAN is stripped. */
+#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
+
+/* IPv4 options. */
+#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
+
+/* IPv6 packet. */
+#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
+
+/* IPv4 packet. */
+#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
+
+/* TCP packet. */
+#define MLX5_CQE_RX_TCP_PACKET (1u << 4)
+
+/* UDP packet. */
+#define MLX5_CQE_RX_UDP_PACKET (1u << 5)
+
+/* IP is fragmented. */
+#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
+
+/* L2 header is valid. */
+#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
+
+/* L3 header is valid. */
+#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
+
+/* L4 header is valid. */
+#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
+
+/* Outer packet, 0 IPv4, 1 IPv6. */
+#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
+
+/* Tunnel packet bit in the CQE. */
+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
+
+/* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */
+#define MLX5_CQE_LRO_PUSH_MASK 0x40
+
+/* Mask for L4 type in the CQE hdr_type_etc field. */
+#define MLX5_CQE_L4_TYPE_MASK 0x70
+
+/* The bit index of L4 type in CQE hdr_type_etc field. */
+#define MLX5_CQE_L4_TYPE_SHIFT 0x4
+
+/* L4 type to indicate TCP packet without acknowledgment. */
+#define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3
+
+/* L4 type to indicate TCP packet with acknowledgment. */
+#define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4
+
+/* Inner L3 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
+
+/* Inner L4 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
+/* VLAN insertion flag. */
+#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31)
+
+/* Data inline segment flag. */
+#define MLX5_ETH_WQE_DATA_INLINE (1u << 31)
+
+/* Is flow mark valid. */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
+#else
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
+#endif
+
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
+/* Default mark mask for metadata legacy mode. */
+#define MLX5_FLOW_MARK_MASK 0xffffff
+
+/* Maximum number of DS in WQE. Limited by 6-bit field. */
+#define MLX5_DSEG_MAX 63
+
+/* The completion mode offset in the WQE control segment line 2. */
+#define MLX5_COMP_MODE_OFFSET 2
+
+/* Amount of data bytes in minimal inline data segment. */
+#define MLX5_DSEG_MIN_INLINE_SIZE 12u
+
+/* Amount of data bytes in minimal inline eth segment. */
+#define MLX5_ESEG_MIN_INLINE_SIZE 18u
+
+/* Amount of data bytes after eth data segment. */
+#define MLX5_ESEG_EXTRA_DATA_SIZE 32u
+
+/* The maximum log value of segments per RQ WQE. */
+#define MLX5_MAX_LOG_RQ_SEGS 5u
+
+/* The alignment needed for WQ buffer. */
+#define MLX5_WQE_BUF_ALIGNMENT sysconf(_SC_PAGESIZE)
+
+/* Completion mode. */
+enum mlx5_completion_mode {
+ MLX5_COMP_ONLY_ERR = 0x0,
+ MLX5_COMP_ONLY_FIRST_ERR = 0x1,
+ MLX5_COMP_ALWAYS = 0x2,
+ MLX5_COMP_CQE_AND_EQE = 0x3,
+};
+
+/* MPW mode. */
+enum mlx5_mpw_mode {
+ MLX5_MPW_DISABLED,
+ MLX5_MPW,
+ MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
+};
+
+/* WQE Control segment. */
+struct mlx5_wqe_cseg {
+ uint32_t opcode;
+ uint32_t sq_ds;
+ uint32_t flags;
+ uint32_t misc;
+} __rte_packed __rte_aligned(MLX5_WSEG_SIZE);
+
+/* Header of data segment. Minimal size Data Segment */
+struct mlx5_wqe_dseg {
+ uint32_t bcount;
+ union {
+ uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
+ struct {
+ uint32_t lkey;
+ uint64_t pbuf;
+ } __rte_packed;
+ };
+} __rte_packed;
+
+/* Subset of struct WQE Ethernet Segment. */
+struct mlx5_wqe_eseg {
+ union {
+ struct {
+ uint32_t swp_offs;
+ uint8_t cs_flags;
+ uint8_t swp_flags;
+ uint16_t mss;
+ uint32_t metadata;
+ uint16_t inline_hdr_sz;
+ union {
+ uint16_t inline_data;
+ uint16_t vlan_tag;
+ };
+ } __rte_packed;
+ struct {
+ uint32_t offsets;
+ uint32_t flags;
+ uint32_t flow_metadata;
+ uint32_t inline_hdr;
+ } __rte_packed;
+ };
+} __rte_packed;
+
+/* The title WQEBB, header of WQE. */
+struct mlx5_wqe {
+ union {
+ struct mlx5_wqe_cseg cseg;
+ uint32_t ctrl[4];
+ };
+ struct mlx5_wqe_eseg eseg;
+ union {
+ struct mlx5_wqe_dseg dseg[2];
+ uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];
+ };
+} __rte_packed;
+
+/* WQE for Multi-Packet RQ. */
+struct mlx5_wqe_mprq {
+ struct mlx5_wqe_srq_next_seg next_seg;
+ struct mlx5_wqe_data_seg dseg;
+};
+
+#define MLX5_MPRQ_LEN_MASK 0x000ffff
+#define MLX5_MPRQ_LEN_SHIFT 0
+#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
+#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
+#define MLX5_MPRQ_FILLER_MASK 0x80000000
+#define MLX5_MPRQ_FILLER_SHIFT 31
+
+#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
+
+/* CQ element structure - should be equal to the cache line size */
+struct mlx5_cqe {
+#if (RTE_CACHE_LINE_SIZE == 128)
+ uint8_t padding[64];
+#endif
+ uint8_t pkt_info;
+ uint8_t rsvd0;
+ uint16_t wqe_id;
+ uint8_t lro_tcppsh_abort_dupack;
+ uint8_t lro_min_ttl;
+ uint16_t lro_tcp_win;
+ uint32_t lro_ack_seq_num;
+ uint32_t rx_hash_res;
+ uint8_t rx_hash_type;
+ uint8_t rsvd1[3];
+ uint16_t csum;
+ uint8_t rsvd2[6];
+ uint16_t hdr_type_etc;
+ uint16_t vlan_info;
+ uint8_t lro_num_seg;
+ uint8_t rsvd3[3];
+ uint32_t flow_table_metadata;
+ uint8_t rsvd4[4];
+ uint32_t byte_cnt;
+ uint64_t timestamp;
+ uint32_t sop_drop_qpn;
+ uint16_t wqe_counter;
+ uint8_t rsvd5;
+ uint8_t op_own;
+};
+
+/* Adding direct verbs to data-path. */
+
+/* CQ sequence number mask. */
+#define MLX5_CQ_SQN_MASK 0x3
+
+/* CQ sequence number index. */
+#define MLX5_CQ_SQN_OFFSET 28
+
+/* CQ doorbell index mask. */
+#define MLX5_CI_MASK 0xffffff
+
+/* CQ doorbell offset. */
+#define MLX5_CQ_ARM_DB 1
+
+/* CQ doorbell offset*/
+#define MLX5_CQ_DOORBELL 0x20
+
+/* CQE format value. */
+#define MLX5_COMPRESSED 0x3
+
+/* CQ doorbell cmd types. */
+#define MLX5_CQ_DBR_CMD_SOL_ONLY (1 << 24)
+#define MLX5_CQ_DBR_CMD_ALL (0 << 24)
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+};
+
+/* Total number of metadata reg_c's. */
+#define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1)
+
+enum modify_reg {
+ REG_NONE = 0,
+ REG_A,
+ REG_B,
+ REG_C_0,
+ REG_C_1,
+ REG_C_2,
+ REG_C_3,
+ REG_C_4,
+ REG_C_5,
+ REG_C_6,
+ REG_C_7,
+};
+
+/* Modification sub command. */
+struct mlx5_modification_cmd {
+ union {
+ uint32_t data0;
+ struct {
+ unsigned int length:5;
+ unsigned int rsvd0:3;
+ unsigned int offset:5;
+ unsigned int rsvd1:3;
+ unsigned int field:12;
+ unsigned int action_type:4;
+ };
+ };
+ union {
+ uint32_t data1;
+ uint8_t data[4];
+ struct {
+ unsigned int rsvd2:8;
+ unsigned int dst_offset:5;
+ unsigned int rsvd3:3;
+ unsigned int dst_field:12;
+ unsigned int rsvd4:4;
+ };
+ };
+};
+
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \
+ (&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \
+ (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \
+ __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
+ (__mlx5_bit_off(typ, fld) & 0xf))
+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << \
+ __mlx5_16_bit_off(typ, fld))
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
+ __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | \
+ (((_v) & __mlx5_mask(typ, fld)) << \
+ __mlx5_dw_bit_off(typ, fld))); \
+ } while (0)
+
+#define MLX5_SET64(typ, p, fld, v) \
+ do { \
+ MLX5_ASSERT(__mlx5_bit_sz(typ, fld) == 64); \
+ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \
+ rte_cpu_to_be_64(v); \
+ } while (0)
+
+#define MLX5_SET16(typ, p, fld, v) \
+ do { \
+ u16 _v = v; \
+ *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
+ rte_cpu_to_be_16((rte_be_to_cpu_16(*((__be16 *)(p) + \
+ __mlx5_16_off(typ, fld))) & \
+ (~__mlx5_16_mask(typ, fld))) | \
+ (((_v) & __mlx5_mask16(typ, fld)) << \
+ __mlx5_16_bit_off(typ, fld))); \
+ } while (0)
+
+#define MLX5_GET(typ, p, fld) \
+ ((rte_be_to_cpu_32(*((__be32 *)(p) +\
+ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+ __mlx5_mask(typ, fld))
+#define MLX5_GET16(typ, p, fld) \
+ ((rte_be_to_cpu_16(*((__be16 *)(p) + \
+ __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
+ __mlx5_mask16(typ, fld))
+#define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((__be64 *)(p) + \
+ __mlx5_64_off(typ, fld)))
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhci_port[0x4];
+ u8 source_sqn[0x18];
+ u8 reserved_at_20[0x10];
+ u8 source_port[0x10];
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
+ u8 gre_protocol[0x10];
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+ u8 vxlan_vni[0x18];
+ u8 reserved_at_b8[0x8];
+ u8 geneve_vni[0x18];
+ u8 reserved_at_e4[0x7];
+ u8 geneve_oam[0x1];
+ u8 reserved_at_e0[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+ u8 reserved_at_100[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+ u8 reserved_at_120[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+ u8 reserved_at_140[0xc0];
+};
+
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
+ u8 frag[0x1];
+ u8 ip_version[0x4];
+ u8 tcp_flags[0x9];
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+ u8 reserved_at_c0[0x18];
+ u8 ip_ttl_hoplimit[0x8];
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+ u8 metadata_reg_c_7[0x20];
+ u8 metadata_reg_c_6[0x20];
+ u8 metadata_reg_c_5[0x20];
+ u8 metadata_reg_c_4[0x20];
+ u8 metadata_reg_c_3[0x20];
+ u8 metadata_reg_c_2[0x20];
+ u8 metadata_reg_c_1[0x20];
+ u8 metadata_reg_c_0[0x20];
+ u8 metadata_reg_a[0x20];
+ u8 metadata_reg_b[0x20];
+ u8 reserved_at_1c0[0x40];
+};
+
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+ u8 outer_tcp_seq_num[0x20];
+ u8 inner_tcp_ack_num[0x20];
+ u8 outer_tcp_ack_num[0x20];
+ u8 reserved_at_auto1[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_a8[0x10];
+ u8 icmp_header_data[0x20];
+ u8 icmpv6_header_data[0x20];
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+ u8 reserved_at_120[0x20];
+ u8 gtpu_teid[0x20];
+ u8 gtpu_msg_type[0x08];
+ u8 gtpu_msg_flags[0x08];
+ u8 reserved_at_170[0x90];
+};
+
+/* Flow matcher. */
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
+};
+
+enum {
+ MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT
+};
+
+enum {
+ MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
+ MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_CREATE_CQ = 0x400,
+ MLX5_CMD_OP_CREATE_QP = 0x500,
+ MLX5_CMD_OP_RST2INIT_QP = 0x502,
+ MLX5_CMD_OP_INIT2RTR_QP = 0x503,
+ MLX5_CMD_OP_RTR2RTS_QP = 0x504,
+ MLX5_CMD_OP_RTS2RTS_QP = 0x505,
+ MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
+ MLX5_CMD_OP_QP_2ERR = 0x507,
+ MLX5_CMD_OP_QP_2RST = 0x50A,
+ MLX5_CMD_OP_QUERY_QP = 0x50B,
+ MLX5_CMD_OP_SQD2RTS_QP = 0x50C,
+ MLX5_CMD_OP_INIT2INIT_QP = 0x50E,
+ MLX5_CMD_OP_SUSPEND_QP = 0x50F,
+ MLX5_CMD_OP_RESUME_QP = 0x510,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_CREATE_TIR = 0x900,
+ MLX5_CMD_OP_CREATE_SQ = 0X904,
+ MLX5_CMD_OP_MODIFY_SQ = 0X905,
+ MLX5_CMD_OP_CREATE_RQ = 0x908,
+ MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_MODIFY_RQT = 0x917,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
+};
+
+enum {
+ MLX5_MKC_ACCESS_MODE_MTT = 0x1,
+ MLX5_MKC_ACCESS_MODE_KLM = 0x2,
+ MLX5_MKC_ACCESS_MODE_KLM_FBS = 0x3,
+};
+
+#define MLX5_ADAPTER_PAGE_SHIFT 12
+#define MLX5_LOG_RQ_STRIDE_SHIFT 4
+/**
+ * The batch counter dcs id starts from 0x800000 and none batch counter
+ * starts from 0. As currently, the counter is changed to be indexed by
+ * pool index and the offset of the counter in the pool counters_raw array.
+ * It means now the counter index is same for batch and none batch counter.
+ * Add the 0x800000 batch counter offset to the batch counter index helps
+ * indicate the counter index is from batch or none batch container pool.
+ */
+#define MLX5_CNT_BATCH_OFFSET 0x800000
+
+/* Flow counters. */
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 flow_counter_id[0x20];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 flow_counter_id[0x20];
+ u8 reserved_at_40[0x18];
+ u8 flow_counter_bulk[0x8];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 flow_counter_id[0x20];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_query_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x20];
+ u8 mkey[0x20];
+ u8 address[0x40];
+ u8 clear[0x1];
+ u8 dump_to_memory[0x1];
+ u8 num_of_counters[0x1e];
+ u8 flow_counter_id[0x20];
+};
+
+#define MLX5_MAX_KLM_BYTE_COUNT 0x80000000u
+#define MLX5_MIN_KLM_FIXED_BUFFER_SIZE 0x1000u
+
+
+struct mlx5_ifc_klm_bits {
+ u8 byte_count[0x20];
+ u8 mkey[0x20];
+ u8 address[0x40];
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_at_0[0x1];
+ u8 free[0x1];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode_1_0[0x2];
+ u8 reserved_at_18[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_at_40[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_at_63[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_at_66[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_at_120[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_at_1c0[0x19];
+ u8 relaxed_ordering_read[0x1];
+ u8 reserved_at_1da[0x1];
+ u8 log_page_size[0x5];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_at_61[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_at_280[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 mkey_umem_id[0x20];
+
+ u8 mkey_umem_offset[0x40];
+
+ u8 reserved_at_380[0x500];
+
+ u8 klm_pas_mtt[][0x20];
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q = (1ULL << 0xd),
+};
+
+enum {
+ MLX5_HCA_CAP_OPMOD_GET_MAX = 0,
+ MLX5_HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum {
+ MLX5_CAP_INLINE_MODE_L2,
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
+};
+
+enum {
+ MLX5_INLINE_MODE_NONE,
+ MLX5_INLINE_MODE_L2,
+ MLX5_INLINE_MODE_IP,
+ MLX5_INLINE_MODE_TCP_UDP,
+ MLX5_INLINE_MODE_RESERVED4,
+ MLX5_INLINE_MODE_INNER_L2,
+ MLX5_INLINE_MODE_INNER_IP,
+ MLX5_INLINE_MODE_INNER_TCP_UDP,
+};
+
+/* HCA bit masks indicating which Flex parser protocols are already enabled. */
+#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)
+#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)
+#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)
+#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)
+#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)
+#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)
+#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)
+#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)
+#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)
+#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_at_0[0x30];
+ u8 vhca_id[0x10];
+ u8 reserved_at_40[0x40];
+ u8 log_max_srq_sz[0x8];
+ u8 log_max_qp_sz[0x8];
+ u8 reserved_at_90[0xb];
+ u8 log_max_qp[0x5];
+ u8 reserved_at_a0[0xb];
+ u8 log_max_srq[0x5];
+ u8 reserved_at_b0[0x10];
+ u8 reserved_at_c0[0x8];
+ u8 log_max_cq_sz[0x8];
+ u8 reserved_at_d0[0xb];
+ u8 log_max_cq[0x5];
+ u8 log_max_eq_sz[0x8];
+ u8 relaxed_ordering_write[0x1];
+ u8 relaxed_ordering_read[0x1];
+ u8 log_max_mkey[0x6];
+ u8 reserved_at_f0[0x8];
+ u8 dump_fill_mkey[0x1];
+ u8 reserved_at_f9[0x3];
+ u8 log_max_eq[0x4];
+ u8 max_indirection[0x8];
+ u8 fixed_buffer_size[0x1];
+ u8 log_max_mrw_sz[0x7];
+ u8 force_teardown[0x1];
+ u8 reserved_at_111[0x1];
+ u8 log_max_bsf_list_size[0x6];
+ u8 umr_extended_translation_offset[0x1];
+ u8 null_mkey[0x1];
+ u8 log_max_klm_list_size[0x6];
+ u8 reserved_at_120[0xa];
+ u8 log_max_ra_req_dc[0x6];
+ u8 reserved_at_130[0xa];
+ u8 log_max_ra_res_dc[0x6];
+ u8 reserved_at_140[0xa];
+ u8 log_max_ra_req_qp[0x6];
+ u8 reserved_at_150[0xa];
+ u8 log_max_ra_res_qp[0x6];
+ u8 end_pad[0x1];
+ u8 cc_query_allowed[0x1];
+ u8 cc_modify_allowed[0x1];
+ u8 start_pad[0x1];
+ u8 cache_line_128byte[0x1];
+ u8 reserved_at_165[0xa];
+ u8 qcam_reg[0x1];
+ u8 gid_table_size[0x10];
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 retransmission_q_counters[0x1];
+ u8 debug[0x1];
+ u8 modify_rq_counter_set_id[0x1];
+ u8 rq_delay_drop[0x1];
+ u8 max_qp_cnt[0xa];
+ u8 pkey_table_size[0x10];
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 vnic_env_queue_counters[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 eswitch_manager[0x1];
+ u8 device_memory[0x1];
+ u8 mcam_reg[0x1];
+ u8 pcam_reg[0x1];
+ u8 local_ca_ack_delay[0x5];
+ u8 port_module_event[0x1];
+ u8 enhanced_error_q_counters[0x1];
+ u8 ports_check[0x1];
+ u8 reserved_at_1b3[0x1];
+ u8 disable_link_up[0x1];
+ u8 beacon_led[0x1];
+ u8 port_type[0x2];
+ u8 num_ports[0x8];
+ u8 reserved_at_1c0[0x1];
+ u8 pps[0x1];
+ u8 pps_modify[0x1];
+ u8 log_max_msg[0x5];
+ u8 reserved_at_1c8[0x4];
+ u8 max_tc[0x4];
+ u8 temp_warn_event[0x1];
+ u8 dcbx[0x1];
+ u8 general_notification_event[0x1];
+ u8 reserved_at_1d3[0x2];
+ u8 fpga[0x1];
+ u8 rol_s[0x1];
+ u8 rol_g[0x1];
+ u8 reserved_at_1d8[0x1];
+ u8 wol_s[0x1];
+ u8 wol_g[0x1];
+ u8 wol_a[0x1];
+ u8 wol_b[0x1];
+ u8 wol_m[0x1];
+ u8 wol_u[0x1];
+ u8 wol_p[0x1];
+ u8 stat_rate_support[0x10];
+ u8 reserved_at_1f0[0xc];
+ u8 cqe_version[0x4];
+ u8 compact_address_vector[0x1];
+ u8 striding_rq[0x1];
+ u8 reserved_at_202[0x1];
+ u8 ipoib_enhanced_offloads[0x1];
+ u8 ipoib_basic_offloads[0x1];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
+ u8 umr_fence[0x2];
+ u8 reserved_at_20c[0x3];
+ u8 drain_sigerr[0x1];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+ u8 reserved_at_213[0x1];
+ u8 wq_signature[0x1];
+ u8 sctr_data_cqe[0x1];
+ u8 reserved_at_216[0x1];
+ u8 sho[0x1];
+ u8 tph[0x1];
+ u8 rf[0x1];
+ u8 dct[0x1];
+ u8 qos[0x1];
+ u8 eth_net_offloads[0x1];
+ u8 roce[0x1];
+ u8 atomic[0x1];
+ u8 reserved_at_21f[0x1];
+ u8 cq_oi[0x1];
+ u8 cq_resize[0x1];
+ u8 cq_moderation[0x1];
+ u8 reserved_at_223[0x3];
+ u8 cq_eq_remap[0x1];
+ u8 pg[0x1];
+ u8 block_lb_mc[0x1];
+ u8 reserved_at_229[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 cq_period_start_from_cqe[0x1];
+ u8 cd[0x1];
+ u8 reserved_at_22d[0x1];
+ u8 apm[0x1];
+ u8 vector_calc[0x1];
+ u8 umr_ptr_rlky[0x1];
+ u8 imaicl[0x1];
+ u8 reserved_at_232[0x4];
+ u8 qkv[0x1];
+ u8 pkv[0x1];
+ u8 set_deth_sqpn[0x1];
+ u8 reserved_at_239[0x3];
+ u8 xrc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+ u8 uar_4k[0x1];
+ u8 reserved_at_241[0x9];
+ u8 uar_sz[0x6];
+ u8 reserved_at_250[0x8];
+ u8 log_pg_sz[0x8];
+ u8 bf[0x1];
+ u8 driver_version[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_at_263[0x8];
+ u8 log_bf_reg_size[0x5];
+ u8 reserved_at_270[0xb];
+ u8 lag_master[0x1];
+ u8 num_lag_ports[0x4];
+ u8 reserved_at_280[0x10];
+ u8 max_wqe_sz_sq[0x10];
+ u8 reserved_at_2a0[0x10];
+ u8 max_wqe_sz_rq[0x10];
+ u8 max_flow_counter_31_16[0x10];
+ u8 max_wqe_sz_sq_dc[0x10];
+ u8 reserved_at_2e0[0x7];
+ u8 max_qp_mcg[0x19];
+ u8 reserved_at_300[0x10];
+ u8 flow_counter_bulk_alloc[0x08];
+ u8 log_max_mcg[0x8];
+ u8 reserved_at_320[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_at_328[0x3];
+ u8 log_max_pd[0x5];
+ u8 reserved_at_330[0xb];
+ u8 log_max_xrcd[0x5];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 reserved_at_343[0x5];
+ u8 log_max_flow_counter_bulk[0x8];
+ u8 max_flow_counter_15_0[0x10];
+ u8 modify_tis[0x1];
+ u8 flow_counters_dump[0x1];
+ u8 reserved_at_360[0x1];
+ u8 log_max_rq[0x5];
+ u8 reserved_at_368[0x3];
+ u8 log_max_sq[0x5];
+ u8 reserved_at_370[0x3];
+ u8 log_max_tir[0x5];
+ u8 reserved_at_378[0x3];
+ u8 log_max_tis[0x5];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_at_381[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_at_388[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_at_390[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_at_398[0x3];
+ u8 log_max_tis_per_sq[0x5];
+ u8 ext_stride_num_range[0x1];
+ u8 reserved_at_3a1[0x2];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_at_3a8[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_at_3b0[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_at_3b8[0x3];
+ u8 log_min_stride_sz_sq[0x5];
+ u8 hairpin[0x1];
+ u8 reserved_at_3c1[0x2];
+ u8 log_max_hairpin_queues[0x5];
+ u8 reserved_at_3c8[0x3];
+ u8 log_max_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3d0[0x3];
+ u8 log_max_hairpin_num_packets[0x5];
+ u8 reserved_at_3d8[0x3];
+ u8 log_max_wq_sz[0x5];
+ u8 nic_vport_change_event[0x1];
+ u8 disable_local_lb_uc[0x1];
+ u8 disable_local_lb_mc[0x1];
+ u8 log_min_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3e8[0x3];
+ u8 log_max_vlan_list[0x5];
+ u8 reserved_at_3f0[0x3];
+ u8 log_max_current_mc_list[0x5];
+ u8 reserved_at_3f8[0x3];
+ u8 log_max_current_uc_list[0x5];
+ u8 general_obj_types[0x40];
+ u8 reserved_at_440[0x20];
+ u8 reserved_at_460[0x10];
+ u8 max_num_eqs[0x10];
+ u8 reserved_at_480[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_at_488[0x8];
+ u8 log_uar_page_sz[0x10];
+ u8 reserved_at_4a0[0x20];
+ u8 device_frequency_mhz[0x20];
+ u8 device_frequency_khz[0x20];
+ u8 reserved_at_500[0x20];
+ u8 num_of_uars_per_page[0x20];
+ u8 flex_parser_protocols[0x20];
+ u8 reserved_at_560[0x20];
+ u8 reserved_at_580[0x3c];
+ u8 mini_cqe_resp_stride_index[0x1];
+ u8 cqe_128_always[0x1];
+ u8 cqe_compression_128[0x1];
+ u8 cqe_compression[0x1];
+ u8 cqe_compression_timeout[0x10];
+ u8 cqe_compression_max_num[0x10];
+ u8 reserved_at_5e0[0x10];
+ u8 tag_matching[0x1];
+ u8 rndv_offload_rc[0x1];
+ u8 rndv_offload_dc[0x1];
+ u8 log_tag_matching_list_sz[0x5];
+ u8 reserved_at_5f8[0x3];
+ u8 log_max_xrq[0x5];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
+};
+
+struct mlx5_ifc_qos_cap_bits {
+ u8 packet_pacing[0x1];
+ u8 esw_scheduling[0x1];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 flow_meter_srtcm[0x1];
+ u8 reserved_at_8[0x8];
+ u8 log_max_flow_meter[0x8];
+ u8 flow_meter_reg_id[0x8];
+ u8 reserved_at_25[0x8];
+ u8 flow_meter_reg_share[0x1];
+ u8 reserved_at_2e[0x17];
+ u8 packet_pacing_max_rate[0x20];
+ u8 packet_pacing_min_rate[0x20];
+ u8 reserved_at_80[0x10];
+ u8 packet_pacing_rate_table_size[0x10];
+ u8 esw_element_type[0x10];
+ u8 esw_tsar_type[0x10];
+ u8 reserved_at_c0[0x10];
+ u8 max_qos_para_vport[0x10];
+ u8 max_tsar_bw_share[0x20];
+ u8 reserved_at_100[0x6e8];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 lro_max_msg_sz_mode[0x2];
+ u8 wqe_vlan_insert[0x1];
+ u8 self_lb_en_modifiable[0x1];
+ u8 self_lb_mc[0x1];
+ u8 self_lb_uc[0x1];
+ u8 max_lso_cap[0x5];
+ u8 multi_pkt_send_wqe[0x2];
+ u8 wqe_inline_mode[0x2];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reg_umr_sq[0x1];
+ u8 scatter_fcs[0x1];
+ u8 enhanced_multi_pkt_send_wqe[0x1];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
+ u8 tunnel_stateless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+ u8 swp[0x1];
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+ u8 reserved_at_23[0x8];
+ u8 tunnel_stateless_gtp[0x1];
+ u8 reserved_at_25[0x4];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
+ u8 max_geneve_opt_len[0x1];
+ u8 tunnel_stateless_geneve_rx[0x1];
+ u8 reserved_at_40[0x10];
+ u8 lro_min_mss_size[0x10];
+ u8 reserved_at_60[0x120];
+ u8 lro_timer_supported_periods[4][0x20];
+ u8 reserved_at_200[0x600];
+};
+
+enum {
+ MLX5_VIRTQ_TYPE_SPLIT = 0,
+ MLX5_VIRTQ_TYPE_PACKED = 1,
+};
+
+enum {
+ MLX5_VIRTQ_EVENT_MODE_NO_MSIX = 0,
+ MLX5_VIRTQ_EVENT_MODE_QP = 1,
+ MLX5_VIRTQ_EVENT_MODE_MSIX = 2,
+};
+
+struct mlx5_ifc_virtio_emulation_cap_bits {
+ u8 desc_tunnel_offload_type[0x1];
+ u8 eth_frame_offload_type[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 tso_ipv4[0x1];
+ u8 tso_ipv6[0x1];
+ u8 tx_csum[0x1];
+ u8 rx_csum[0x1];
+ u8 reserved_at_7[0x1][0x9];
+ u8 event_mode[0x8];
+ u8 virtio_queue_type[0x8];
+ u8 reserved_at_20[0x13];
+ u8 log_doorbell_stride[0x5];
+ u8 reserved_at_3b[0x3];
+ u8 log_doorbell_bar_size[0x5];
+ u8 doorbell_bar_offset[0x40];
+ u8 reserved_at_80[0x8];
+ u8 max_num_virtio_queues[0x18];
+ u8 reserved_at_a0[0x60];
+ u8 umem_1_buffer_param_a[0x20];
+ u8 umem_1_buffer_param_b[0x20];
+ u8 umem_2_buffer_param_a[0x20];
+ u8 umem_2_buffer_param_b[0x20];
+ u8 umem_3_buffer_param_a[0x20];
+ u8 umem_3_buffer_param_b[0x20];
+ u8 reserved_at_1c0[0x620];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits
+ per_protocol_networking_offload_caps;
+ struct mlx5_ifc_qos_cap_bits qos_cap;
+ struct mlx5_ifc_virtio_emulation_cap_bits vdpa_caps;
+ u8 reserved_at_0[0x8000];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_at_0[0x10];
+ u8 mac_addr_47_32[0x10];
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_at_0[0x5];
+ u8 min_wqe_inline_mode[0x3];
+ u8 reserved_at_8[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
+ u8 roce_en[0x1];
+ u8 arm_change_event[0x1];
+ u8 reserved_at_21[0x1a];
+ u8 event_on_mtu[0x1];
+ u8 event_on_promisc_change[0x1];
+ u8 event_on_vlan_change[0x1];
+ u8 event_on_mc_address_change[0x1];
+ u8 event_on_uc_address_change[0x1];
+ u8 reserved_at_40[0xc];
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+ u8 reserved_at_60[0xd0];
+ u8 mtu[0x10];
+ u8 system_image_guid[0x40];
+ u8 port_guid[0x40];
+ u8 node_guid[0x40];
+ u8 reserved_at_200[0x140];
+ u8 qkey_violation_counter[0x10];
+ u8 reserved_at_350[0x430];
+ u8 promisc_uc[0x1];
+ u8 promisc_mc[0x1];
+ u8 promisc_all[0x1];
+ u8 reserved_at_783[0x2];
+ u8 allowed_list_type[0x3];
+ u8 reserved_at_788[0xc];
+ u8 allowed_list_size[0xc];
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+ u8 reserved_at_7e0[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+ u8 reserved_at_60[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_at_68[0x18];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 strict_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1[0x3];
+ u8 lag_tx_port_affinity[0x04];
+ u8 reserved_at_8[0x4];
+ u8 prio[0x4];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x100];
+ u8 reserved_at_120[0x8];
+ u8 transport_domain[0x18];
+ u8 reserved_at_140[0x8];
+ u8 underlay_qpn[0x18];
+ u8 reserved_at_160[0x3a0];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 tisn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 transport_domain[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_at_8[0x18];
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_at_24[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+ u8 reserved_at_40[0x8];
+ u8 pd[0x18];
+ u8 reserved_at_60[0x8];
+ u8 uar_page[0x18];
+ u8 dbr_addr[0x40];
+ u8 hw_counter[0x20];
+ u8 sw_counter[0x20];
+ u8 reserved_at_100[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_at_110[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_at_118[0x3];
+ u8 log_wq_sz[0x5];
+ u8 dbr_umem_valid[0x1];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_122[0x1];
+ u8 log_hairpin_num_packets[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_hairpin_data_sz[0x5];
+ u8 reserved_at_130[0x4];
+ u8 single_wqe_log_num_of_strides[0x4];
+ u8 two_byte_shift_en[0x1];
+ u8 reserved_at_139[0x4];
+ u8 single_stride_log_num_of_bytes[0x3];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+ u8 wq_umem_offset[0x40];
+ u8 reserved_at_1c0[0x440];
+};
+
+enum {
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlky[0x1];
+ u8 delay_drop_en[0x1];
+ u8 scatter_fcs[0x1];
+ u8 vsd[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_at_c[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
+ u8 reserved_at_20[0x8];
+ u8 user_index[0x18];
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+ u8 counter_set_id[0x8];
+ u8 reserved_at_68[0x18];
+ u8 reserved_at_80[0x8];
+ u8 rmpn[0x18];
+ u8 reserved_at_a0[0x8];
+ u8 hairpin_peer_sq[0x18];
+ u8 reserved_at_c0[0x10];
+ u8 hairpin_peer_vhca[0x10];
+ u8 reserved_at_e0[0xa0];
+ struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 rqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 tisn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+enum {
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 rq_state[0x4];
+ u8 reserved_at_44[0x4];
+ u8 rqn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 modify_bitmask[0x40];
+ u8 reserved_at_c0[0x40];
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
+};
+
+enum {
+ MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 = 0x0,
+ MLX5_LRO_MAX_MSG_SIZE_START_FROM_L2 = 0x1,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_at_0[0x20];
+ u8 disp_type[0x4];
+ u8 reserved_at_24[0x1c];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_msg_sz[0x8];
+ u8 reserved_at_a0[0x40];
+ u8 reserved_at_e0[0x8];
+ u8 inline_rqn[0x18];
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_at_101[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_at_103[0x5];
+ u8 indirect_table[0x18];
+ u8 rx_hash_fn[0x4];
+ u8 reserved_at_124[0x2];
+ u8 self_lb_block[0x2];
+ u8 transport_domain[0x18];
+ u8 rx_hash_toeplitz_key[10][0x20];
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+ u8 reserved_at_2c0[0x4c0];
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+enum {
+ MLX5_INLINE_Q_TYPE_RQ = 0x0,
+ MLX5_INLINE_Q_TYPE_VIRTQ = 0x1,
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_at_0[0xa5];
+ u8 list_q_type[0x3];
+ u8 reserved_at_a8[0x8];
+ u8 rqt_max_size[0x10];
+ u8 reserved_at_c0[0x10];
+ u8 rqt_actual_size[0x10];
+ u8 reserved_at_e0[0x6a0];
+ struct mlx5_ifc_rq_num_bits rq_num[];
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 rqtn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 rqtn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 modify_bitmask[0x40];
+ u8 reserved_at_c0[0x40];
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+struct mlx5_ifc_modify_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlky[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
+ u8 min_wqe_inline_mode[0x3];
+ u8 state[0x4];
+ u8 reg_umr[0x1];
+ u8 allow_swp[0x1];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
+ u8 reserved_at_20[0x8];
+ u8 user_index[0x18];
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+ u8 reserved_at_60[0x8];
+ u8 hairpin_peer_rq[0x18];
+ u8 reserved_at_80[0x10];
+ u8 hairpin_peer_vhca[0x10];
+ u8 reserved_at_a0[0x50];
+ u8 packet_pacing_rate_limit_index[0x10];
+ u8 tis_lst_sz[0x10];
+ u8 reserved_at_110[0x10];
+ u8 reserved_at_120[0x40];
+ u8 reserved_at_160[0x8];
+ u8 tis_num_0[0x18];
+ struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 sqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 sq_state[0x4];
+ u8 reserved_at_44[0x4];
+ u8 sqn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 modify_bitmask[0x40];
+ u8 reserved_at_c0[0x40];
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 sqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+enum {
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4),
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1]; // 00h
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_1[0x19];
+ u8 reserved_at_2[0x20]; //04h
+ u8 reserved_at_3[0x3];
+ u8 cbs_exponent[0x5]; // 08h
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_4[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+ u8 reserved_at_5[0x20]; // 0Ch
+ u8 reserved_at_6[0x3];
+ u8 ebs_exponent[0x5]; // 10h
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_7[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+ u8 reserved_at_8[0x60]; // 14h-1Ch
+};
+
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 as_notify[0x1];
+ u8 initiator_src_dct[0x1];
+ u8 dbr_umem_valid[0x1];
+ u8 reserved_at_7[0x1];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_at_c[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 cq_period_mode[0x2];
+ u8 cqe_comp_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_at_18[0x8];
+ u8 dbr_umem_id[0x20];
+ u8 reserved_at_40[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_at_5a[0x6];
+ u8 reserved_at_60[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+ u8 reserved_at_80[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+ u8 reserved_at_a0[0x18];
+ u8 c_eqn[0x8];
+ u8 reserved_at_c0[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_c8[0x18];
+ u8 reserved_at_e0[0x20];
+ u8 reserved_at_100[0x8];
+ u8 last_notified_index[0x18];
+ u8 reserved_at_120[0x8];
+ u8 last_solicit_index[0x18];
+ u8 reserved_at_140[0x8];
+ u8 consumer_counter[0x18];
+ u8 reserved_at_160[0x8];
+ u8 producer_counter[0x18];
+ u8 local_partition_id[0xc];
+ u8 process_id[0x14];
+ u8 reserved_at_1A0[0x20];
+ u8 dbr_addr[0x40];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_cqc_bits cq_context;
+ u8 cq_umem_offset[0x40];
+ u8 cq_umem_id[0x20];
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+ u8 reserved_at_300[0x580];
+ u8 pas[];
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d,
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x20];
+ u8 obj_type[0x10];
+ u8 obj_id[0x20];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 obj_id[0x20];
+ u8 reserved_at_60[0x20];
+};
+
+enum {
+ MLX5_VIRTQ_STATE_INIT = 0,
+ MLX5_VIRTQ_STATE_RDY = 1,
+ MLX5_VIRTQ_STATE_SUSPEND = 2,
+ MLX5_VIRTQ_STATE_ERROR = 3,
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_TYPE_STATE = (1UL << 0),
+ MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS = (1UL << 3),
+ MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE = (1UL << 4),
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+ u8 doorbell_stride_idx[0x10];
+ u8 queue_size[0x10];
+ u8 device_emulation_id[0x20];
+ u8 desc_addr[0x40];
+ u8 used_addr[0x40];
+ u8 available_addr[0x40];
+ u8 virtio_q_mkey[0x20];
+ u8 reserved_at_160[0x20];
+ u8 umem_1_id[0x20];
+ u8 umem_1_size[0x20];
+ u8 umem_1_offset[0x40];
+ u8 umem_2_id[0x20];
+ u8 umem_2_size[0x20];
+ u8 umem_2_offset[0x40];
+ u8 umem_3_id[0x20];
+ u8 umem_3_size[0x20];
+ u8 umem_3_offset[0x40];
+ u8 reserved_at_300[0x100];
+};
+
+struct mlx5_ifc_virtio_net_q_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 tso_ipv4[0x1];
+ u8 tso_ipv6[0x1];
+ u8 tx_csum[0x1];
+ u8 rx_csum[0x1];
+ u8 reserved_at_84[0x6];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+ u8 error_type[0x8];
+ u8 tisn_or_qpn[0x18];
+ u8 dirty_bitmap_mkey[0x20];
+ u8 dirty_bitmap_size[0x20];
+ u8 dirty_bitmap_addr[0x40];
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+ u8 reserved_at_160[0xa0];
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtq_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_net_q_bits virtq;
+};
+
+struct mlx5_ifc_query_virtq_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_net_q_bits virtq;
+};
+
+enum {
+ MLX5_QP_ST_RC = 0x0,
+};
+
+enum {
+ MLX5_QP_PM_MIGRATED = 0x3,
+};
+
+enum {
+ MLX5_NON_ZERO_RQ = 0x0,
+ MLX5_SRQ_RQ = 0x1,
+ MLX5_CRQ_RQ = 0x2,
+ MLX5_ZERO_LEN_RQ = 0x3,
+};
+
+struct mlx5_ifc_ads_bits {
+ u8 fl[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_at_2[0xe];
+ u8 pkey_index[0x10];
+ u8 reserved_at_20[0x8];
+ u8 grh[0x1];
+ u8 mlid[0x7];
+ u8 rlid[0x10];
+ u8 ack_timeout[0x5];
+ u8 reserved_at_45[0x3];
+ u8 src_addr_index[0x8];
+ u8 reserved_at_50[0x4];
+ u8 stat_rate[0x4];
+ u8 hop_limit[0x8];
+ u8 reserved_at_60[0x4];
+ u8 tclass[0x8];
+ u8 flow_label[0x14];
+ u8 rgid_rip[16][0x8];
+ u8 reserved_at_100[0x4];
+ u8 f_dscp[0x1];
+ u8 f_ecn[0x1];
+ u8 reserved_at_106[0x1];
+ u8 f_eth_prio[0x1];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+ u8 udp_sport[0x10];
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 sl[0x4];
+ u8 vhca_port_num[0x8];
+ u8 rmac_47_32[0x10];
+ u8 rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_qpc_bits {
+ u8 state[0x4];
+ u8 lag_tx_port_affinity[0x4];
+ u8 st[0x8];
+ u8 reserved_at_10[0x3];
+ u8 pm_state[0x2];
+ u8 reserved_at_15[0x1];
+ u8 req_e2e_credit_mode[0x2];
+ u8 offload_type[0x4];
+ u8 end_padding_mode[0x2];
+ u8 reserved_at_1e[0x2];
+ u8 wq_signature[0x1];
+ u8 block_lb_mc[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 reserved_at_24[0x1];
+ u8 drain_sigerr[0x1];
+ u8 reserved_at_26[0x2];
+ u8 pd[0x18];
+ u8 mtu[0x3];
+ u8 log_msg_max[0x5];
+ u8 reserved_at_48[0x1];
+ u8 log_rq_size[0x4];
+ u8 log_rq_stride[0x3];
+ u8 no_sq[0x1];
+ u8 log_sq_size[0x4];
+ u8 reserved_at_55[0x6];
+ u8 rlky[0x1];
+ u8 ulp_stateless_offload_mode[0x4];
+ u8 counter_set_id[0x8];
+ u8 uar_page[0x18];
+ u8 reserved_at_80[0x8];
+ u8 user_index[0x18];
+ u8 reserved_at_a0[0x3];
+ u8 log_page_size[0x5];
+ u8 remote_qpn[0x18];
+ struct mlx5_ifc_ads_bits primary_address_path;
+ struct mlx5_ifc_ads_bits secondary_address_path;
+ u8 log_ack_req_freq[0x4];
+ u8 reserved_at_384[0x4];
+ u8 log_sra_max[0x3];
+ u8 reserved_at_38b[0x2];
+ u8 retry_count[0x3];
+ u8 rnr_retry[0x3];
+ u8 reserved_at_393[0x1];
+ u8 fre[0x1];
+ u8 cur_rnr_retry[0x3];
+ u8 cur_retry_count[0x3];
+ u8 reserved_at_39b[0x5];
+ u8 reserved_at_3a0[0x20];
+ u8 reserved_at_3c0[0x8];
+ u8 next_send_psn[0x18];
+ u8 reserved_at_3e0[0x8];
+ u8 cqn_snd[0x18];
+ u8 reserved_at_400[0x8];
+ u8 deth_sqpn[0x18];
+ u8 reserved_at_420[0x20];
+ u8 reserved_at_440[0x8];
+ u8 last_acked_psn[0x18];
+ u8 reserved_at_460[0x8];
+ u8 ssn[0x18];
+ u8 reserved_at_480[0x8];
+ u8 log_rra_max[0x3];
+ u8 reserved_at_48b[0x1];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 reserved_at_493[0x1];
+ u8 page_offset[0x6];
+ u8 reserved_at_49a[0x3];
+ u8 cd_slave_receive[0x1];
+ u8 cd_slave_send[0x1];
+ u8 cd_master[0x1];
+ u8 reserved_at_4a0[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 next_rcv_psn[0x18];
+ u8 reserved_at_4c0[0x8];
+ u8 xrcd[0x18];
+ u8 reserved_at_4e0[0x8];
+ u8 cqn_rcv[0x18];
+ u8 dbr_addr[0x40];
+ u8 q_key[0x20];
+ u8 reserved_at_560[0x5];
+ u8 rq_type[0x3];
+ u8 srqn_rmpn_xrqn[0x18];
+ u8 reserved_at_580[0x8];
+ u8 rmsn[0x18];
+ u8 hw_sq_wqebb_counter[0x10];
+ u8 sw_sq_wqebb_counter[0x10];
+ u8 hw_rq_counter[0x20];
+ u8 sw_rq_counter[0x20];
+ u8 reserved_at_600[0x20];
+ u8 reserved_at_620[0xf];
+ u8 cgs[0x1];
+ u8 cs_req[0x8];
+ u8 cs_res[0x8];
+ u8 dc_access_key[0x40];
+ u8 reserved_at_680[0x3];
+ u8 dbr_umem_valid[0x1];
+ u8 reserved_at_684[0x9c];
+ u8 dbr_umem_id[0x20];
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+struct mlx5_ifc_create_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 wq_umem_offset[0x40];
+ u8 wq_umem_id[0x20];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_861[0x1f];
+ u8 pas[0][0x40];
+};
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+};
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+struct mlx5_ifc_query_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ u8 opt_param_mask[0x20];
+ u8 reserved_at_a0[0x20];
+ struct mlx5_ifc_qpc_bits qpc;
+ u8 reserved_at_800[0x80];
+ u8 pas[0][0x40];
+};
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+struct mlx5_ifc_query_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+/* CQE format mask. */
+#define MLX5E_CQE_FORMAT_MASK 0xc
+
+/* MPW opcode. */
+#define MLX5_OPC_MOD_MPW 0x01
+
+/* Compressed Rx CQE structure. */
+struct mlx5_mini_cqe8 {
+ union {
+ uint32_t rx_hash_result;
+ struct {
+ uint16_t checksum;
+ uint16_t stride_idx;
+ };
+ struct {
+ uint16_t wqe_counter;
+ uint8_t s_wqe_opcode;
+ uint8_t reserved;
+ } s_wqe_info;
+ };
+ uint32_t byte_cnt;
+};
+
+/* srTCM PRM flow meter parameters. */
+enum {
+ MLX5_FLOW_COLOR_RED = 0,
+ MLX5_FLOW_COLOR_YELLOW,
+ MLX5_FLOW_COLOR_GREEN,
+ MLX5_FLOW_COLOR_UNDEFINED,
+};
+
+/* Maximum value of srTCM metering parameters. */
+#define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F))
+#define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF)
+#define MLX5_SRTCM_EBS_MAX 0
+
+/* The bits meter color use. */
+#define MLX5_MTR_COLOR_BITS 8
+
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+ uint32_t ret;
+
+ /*
+ * Add one to the user value to differentiate un-marked flows from
+ * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
+ * remains untouched.
+ */
+ if (val != MLX5_FLOW_MARK_DEFAULT)
+ ++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+ * word, byte-swapped by the kernel on little-endian systems. In this
+ * case, left-shifting the resulting big-endian value ensures the
+ * least significant 24 bits are retained when converting it back.
+ */
+ ret = rte_cpu_to_be_32(val) >> 8;
+#else
+ ret = val;
+#endif
+ return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+ /*
+ * Subtract one from the retrieved value. It was added by
+ * mlx5_flow_mark_set() to distinguish unmarked flows.
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ return (val >> 8) - 1;
+#else
+ return val - 1;
+#endif
+}
+
+#endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/src/spdk/dpdk/drivers/common/mlx5/rte_common_mlx5_version.map b/src/spdk/dpdk/drivers/common/mlx5/rte_common_mlx5_version.map
new file mode 100644
index 000000000..350e77140
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mlx5/rte_common_mlx5_version.map
@@ -0,0 +1,75 @@
+INTERNAL {
+ global:
+
+ mlx5_class_get;
+
+ mlx5_create_mr_ext;
+
+ mlx5_dev_to_pci_addr;
+
+ mlx5_devx_cmd_create_cq;
+ mlx5_devx_cmd_create_qp;
+ mlx5_devx_cmd_create_rq;
+ mlx5_devx_cmd_create_rqt;
+ mlx5_devx_cmd_create_sq;
+ mlx5_devx_cmd_create_tir;
+ mlx5_devx_cmd_create_td;
+ mlx5_devx_cmd_create_tis;
+ mlx5_devx_cmd_create_virtq;
+ mlx5_devx_cmd_destroy;
+ mlx5_devx_cmd_flow_counter_alloc;
+ mlx5_devx_cmd_flow_counter_query;
+ mlx5_devx_cmd_flow_dump;
+ mlx5_devx_cmd_mkey_create;
+ mlx5_devx_cmd_modify_qp_state;
+ mlx5_devx_cmd_modify_rq;
+ mlx5_devx_cmd_modify_rqt;
+ mlx5_devx_cmd_modify_sq;
+ mlx5_devx_cmd_modify_virtq;
+ mlx5_devx_cmd_qp_query_tis_td;
+ mlx5_devx_cmd_query_hca_attr;
+ mlx5_devx_cmd_query_virtq;
+ mlx5_devx_get_out_command_status;
+
+ mlx5_mp_init_primary;
+ mlx5_mp_uninit_primary;
+ mlx5_mp_init_secondary;
+ mlx5_mp_uninit_secondary;
+ mlx5_mp_req_mr_create;
+ mlx5_mp_req_queue_state_modify;
+ mlx5_mp_req_verbs_cmd_fd;
+
+ mlx5_mr_btree_init;
+ mlx5_mr_btree_free;
+ mlx5_mr_btree_dump;
+ mlx5_mr_addr2mr_bh;
+ mlx5_mr_release_cache;
+ mlx5_mr_dump_cache;
+ mlx5_mr_rebuild_cache;
+ mlx5_mr_insert_cache;
+ mlx5_mr_lookup_cache;
+ mlx5_mr_lookup_list;
+ mlx5_mr_create_primary;
+ mlx5_mr_flush_local_cache;
+
+ mlx5_nl_allmulti;
+ mlx5_nl_devlink_family_id_get;
+ mlx5_nl_driver_reload;
+ mlx5_nl_enable_roce_get;
+ mlx5_nl_enable_roce_set;
+ mlx5_nl_ifindex;
+ mlx5_nl_init;
+ mlx5_nl_mac_addr_add;
+ mlx5_nl_mac_addr_flush;
+ mlx5_nl_mac_addr_remove;
+ mlx5_nl_mac_addr_sync;
+ mlx5_nl_portnum;
+ mlx5_nl_promisc;
+ mlx5_nl_switch_info;
+ mlx5_nl_vf_mac_addr_modify;
+ mlx5_nl_vlan_vmwa_create;
+ mlx5_nl_vlan_vmwa_delete;
+
+ mlx5_translate_port_name;
+};
+
diff --git a/src/spdk/dpdk/drivers/common/mvep/Makefile b/src/spdk/dpdk/drivers/common/mvep/Makefile
new file mode 100644
index 000000000..f91d295e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mvep/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_common_mvep.a
+
+# versioning export map
+EXPORT_MAP := rte_common_mvep_version.map
+
+# external library dependencies
+CFLAGS += -I$($RTE_SDK)/drivers/common/mvep
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_kvargs
+
+# library source files
+SRCS-y += mvep_common.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/mvep/meson.build b/src/spdk/dpdk/drivers/common/mvep/meson.build
new file mode 100644
index 000000000..8df4bc6e0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mvep/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+#
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+ reason = 'missing dependency, "libmusdk"'
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('mvep_common.c')
diff --git a/src/spdk/dpdk/drivers/common/mvep/mvep_common.c b/src/spdk/dpdk/drivers/common/mvep/mvep_common.c
new file mode 100644
index 000000000..67fa65b57
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mvep/mvep_common.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+
+#include <env/mv_autogen_comp_flags.h>
+#include <env/mv_sys_dma.h>
+
+#include "rte_mvep_common.h"
+
+/* Memory size (in bytes) for MUSDK dma buffers */
+#define MRVL_MUSDK_DMA_MEMSIZE (40 * 1024 * 1024)
+
+struct mvep {
+ uint32_t ref_count;
+};
+
+static struct mvep mvep;
+
+int rte_mvep_init(enum mvep_module_type module __rte_unused,
+ struct rte_kvargs *kvlist __rte_unused)
+{
+ int ret;
+
+ if (!mvep.ref_count) {
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret)
+ return ret;
+ }
+
+ mvep.ref_count++;
+
+ return 0;
+}
+
+int rte_mvep_deinit(enum mvep_module_type module __rte_unused)
+{
+ mvep.ref_count--;
+
+ if (!mvep.ref_count)
+ mv_sys_dma_mem_destroy();
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/common/mvep/rte_common_mvep_version.map b/src/spdk/dpdk/drivers/common/mvep/rte_common_mvep_version.map
new file mode 100644
index 000000000..030928439
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mvep/rte_common_mvep_version.map
@@ -0,0 +1,8 @@
+DPDK_20.0 {
+ global:
+
+ rte_mvep_deinit;
+ rte_mvep_init;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/common/mvep/rte_mvep_common.h b/src/spdk/dpdk/drivers/common/mvep/rte_mvep_common.h
new file mode 100644
index 000000000..0593cefcd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/mvep/rte_mvep_common.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ */
+
+#ifndef __RTE_MVEP_COMMON_H__
+#define __RTE_MVEP_COMMON_H__
+
+#include <rte_kvargs.h>
+
+enum mvep_module_type {
+ MVEP_MOD_T_NONE = 0,
+ MVEP_MOD_T_PP2,
+ MVEP_MOD_T_SAM,
+ MVEP_MOD_T_NETA,
+ MVEP_MOD_T_LAST
+};
+
+int rte_mvep_init(enum mvep_module_type module, struct rte_kvargs *kvlist);
+int rte_mvep_deinit(enum mvep_module_type module);
+
+#endif /* __RTE_MVEP_COMMON_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx/Makefile b/src/spdk/dpdk/drivers/common/octeontx/Makefile
new file mode 100644
index 000000000..5e67df058
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_common_octeontx_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += octeontx_mbox.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/octeontx/meson.build b/src/spdk/dpdk/drivers/common/octeontx/meson.build
new file mode 100644
index 000000000..203d1ef49
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+sources = files('octeontx_mbox.c')
diff --git a/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c
new file mode 100644
index 000000000..effe0b267
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
+
+#include "octeontx_mbox.h"
+
+/* Mbox operation timeout in seconds */
+#define MBOX_WAIT_TIME_SEC 3
+#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
+
+/* Mbox channel state */
+enum {
+ MBOX_CHAN_STATE_REQ = 1,
+ MBOX_CHAN_STATE_RES = 0,
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+struct mbox {
+ int init_once;
+ uint8_t ready;
+ uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
+ uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
+ uint16_t tag_own; /* Last tag which was written to own channel */
+ uint16_t domain; /* Domain */
+ rte_spinlock_t lock;
+};
+
+static struct mbox octeontx_mbox;
+
+/*
+ * Structure used for mbox synchronization
+ * This structure sits at the begin of Mbox RAM and used as main
+ * synchronization point for channel communication
+ */
+struct mbox_ram_hdr {
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t chan_state : 1;
+ uint8_t coproc : 7;
+ uint8_t msg;
+ uint8_t vfid;
+ uint8_t res_code;
+ uint16_t tag;
+ uint16_t len;
+ };
+ };
+};
+
+/* MBOX interface version message */
+struct mbox_intf_ver {
+ uint32_t platform:12;
+ uint32_t major:10;
+ uint32_t minor:10;
+};
+
+int octeontx_logtype_mbox;
+
+RTE_INIT(otx_init_log)
+{
+ octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox");
+ if (octeontx_logtype_mbox >= 0)
+ rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE);
+}
+
+static inline void
+mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
+{
+ uint16_t i;
+
+ for (i = 0; i < size; i++)
+ d[i] = s[i];
+}
+
+static inline void
+mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ const void *txmsg, uint16_t txsize)
+{
+ struct mbox_ram_hdr old_hdr;
+ struct mbox_ram_hdr new_hdr = { {0} };
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /*
+ * Initialize the channel with the tag left by last send.
+ * On success full mbox send complete, PF increments the tag by one.
+ * The sender can validate integrity of PF message with this scheme
+ */
+ old_hdr.u64 = rte_read64(ram_mbox_hdr);
+ m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
+
+ /* Copy msg body */
+ if (txmsg)
+ mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
+
+ /* Prepare new hdr */
+ new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
+ new_hdr.coproc = hdr->coproc;
+ new_hdr.msg = hdr->msg;
+ new_hdr.vfid = hdr->vfid;
+ new_hdr.tag = m->tag_own;
+ new_hdr.len = txsize;
+
+ /* Write the msg header */
+ rte_write64(new_hdr.u64, ram_mbox_hdr);
+ rte_smp_wmb();
+ /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
+ rte_write64(0, m->reg);
+}
+
+static inline int
+mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ void *rxmsg, uint16_t rxsize)
+{
+ int res = 0, wait;
+ uint16_t len;
+ struct mbox_ram_hdr rx_hdr;
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /* Wait for response */
+ wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
+ while (wait > 0) {
+ rte_delay_us(100);
+ rx_hdr.u64 = rte_read64(ram_mbox_hdr);
+ if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
+ break;
+ --wait;
+ }
+
+ hdr->res_code = rx_hdr.res_code;
+ m->tag_own++;
+
+ /* Timeout */
+ if (wait <= 0) {
+ res = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Tag mismatch */
+ if (m->tag_own != rx_hdr.tag) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ /* PF nacked the msg */
+ if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
+ res = -EBADMSG;
+ goto error;
+ }
+
+ len = RTE_MIN(rx_hdr.len, rxsize);
+ if (rxmsg)
+ mbox_msgcpy(rxmsg, ram_mbox_msg, len);
+
+ return len;
+
+error:
+ mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
+ hdr->res_code);
+ return res;
+}
+
+static inline int
+mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
+ uint16_t txsize, void *rxmsg, uint16_t rxsize)
+{
+ int res = -EINVAL;
+
+ if (m->init_once == 0 || hdr == NULL ||
+ txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
+ mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ m->init_once, hdr, txsize, rxsize);
+ return res;
+ }
+
+ rte_spinlock_lock(&m->lock);
+
+ mbox_send_request(m, hdr, txmsg, txsize);
+ res = mbox_wait_response(m, hdr, rxmsg, rxsize);
+
+ rte_spinlock_unlock(&m->lock);
+ return res;
+}
+
+int
+octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (ram_mbox_base == NULL) {
+ mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base);
+ return -EINVAL;
+ }
+
+ m->ram_mbox_base = ram_mbox_base;
+
+ if (m->reg != NULL) {
+ rte_spinlock_init(&m->lock);
+ m->init_once = 1;
+ m->domain = domain;
+ }
+
+ return 0;
+}
+
+int
+octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (reg == NULL) {
+ mbox_log_err("Invalid reg=%p", reg);
+ return -EINVAL;
+ }
+
+ m->reg = reg;
+
+ if (m->ram_mbox_base != NULL) {
+ rte_spinlock_init(&m->lock);
+ m->init_once = 1;
+ m->domain = domain;
+ }
+
+ return 0;
+}
+
+int
+octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+ uint16_t txlen, void *rxdata, uint16_t rxlen)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EINVAL;
+
+ return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
+}
+
+static int
+octeontx_start_domain(void)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ int result = -EINVAL;
+
+ hdr.coproc = NO_COPROC;
+ hdr.msg = RM_START_APP;
+
+ result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (result != 0) {
+ mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
+ result, hdr.res_code);
+ result = -EINVAL;
+ }
+
+ return result;
+}
+
+static int
+octeontx_check_mbox_version(struct mbox_intf_ver *app_intf_ver,
+ struct mbox_intf_ver *intf_ver)
+{
+ struct mbox_intf_ver kernel_intf_ver = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int result = 0;
+
+
+ hdr.coproc = NO_COPROC;
+ hdr.msg = RM_INTERFACE_VERSION;
+
+ result = octeontx_mbox_send(&hdr, app_intf_ver,
+ sizeof(struct mbox_intf_ver),
+ &kernel_intf_ver, sizeof(kernel_intf_ver));
+ if (result != sizeof(kernel_intf_ver)) {
+ mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
+ result, hdr.res_code);
+ result = -EINVAL;
+ }
+
+ if (intf_ver)
+ *intf_ver = kernel_intf_ver;
+
+ if (app_intf_ver->platform != kernel_intf_ver.platform ||
+ app_intf_ver->major != kernel_intf_ver.major ||
+ app_intf_ver->minor != kernel_intf_ver.minor)
+ result = -EINVAL;
+
+ return result;
+}
+
+int
+octeontx_mbox_init(void)
+{
+ struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
+ .platform = 0x01,
+ .major = 0x01,
+ .minor = 0x03
+ };
+ struct mbox_intf_ver rm_intf_ver = {0};
+ struct mbox *m = &octeontx_mbox;
+ int ret;
+
+ if (m->ready)
+ return 0;
+
+ ret = octeontx_start_domain();
+ if (ret < 0) {
+ m->init_once = 0;
+ return ret;
+ }
+
+ ret = octeontx_check_mbox_version(&MBOX_INTERFACE_VERSION,
+ &rm_intf_ver);
+ if (ret < 0) {
+ mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
+ rm_intf_ver.platform, rm_intf_ver.major,
+ rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
+ MBOX_INTERFACE_VERSION.major,
+ MBOX_INTERFACE_VERSION.minor);
+ m->init_once = 0;
+ return -EINVAL;
+ }
+
+ m->ready = 1;
+ rte_mb();
+
+ return 0;
+}
+
+uint16_t
+octeontx_get_global_domain(void)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ return m->domain;
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h
new file mode 100644
index 000000000..e56719cb8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/octeontx_mbox.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_MBOX_H__
+#define __OCTEONTX_MBOX_H__
+
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#define SSOW_BAR4_LEN (64 * 1024)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+#define NO_COPROC 0x0
+#define RM_START_APP 0x1
+#define RM_INTERFACE_VERSION 0x2
+
+
+#define MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
+#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
+#define mbox_func_trace mbox_log_dbg
+
+extern int octeontx_logtype_mbox;
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t oob; /* out of band data */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_mbox_init(void);
+void octeontx_set_global_domain(uint16_t global_domain);
+uint16_t octeontx_get_global_domain(void);
+int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain);
+int octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain);
+int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map b/src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map
new file mode 100644
index 000000000..5f6aa8bd3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -0,0 +1,12 @@
+DPDK_20.0 {
+ global:
+
+ octeontx_get_global_domain;
+ octeontx_logtype_mbox;
+ octeontx_mbox_init;
+ octeontx_mbox_send;
+ octeontx_mbox_set_ram_mbox_base;
+ octeontx_mbox_set_reg;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/Makefile b/src/spdk/dpdk/drivers/common/octeontx2/Makefile
new file mode 100644
index 000000000..260da8dd3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_octeontx2.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_common_octeontx2_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += otx2_dev.c
+SRCS-y += otx2_irq.c
+SRCS-y += otx2_mbox.c
+SRCS-y += otx2_common.c
+SRCS-y += otx2_sec_idev.c
+
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_ethdev -lrte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h
new file mode 100644
index 000000000..e3b68505b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h
@@ -0,0 +1,1391 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_NIX_HW_H__
+#define __OTX2_NIX_HW_H__
+
+/* Register offsets */
+
+#define NIX_AF_CFG (0x0ull)
+#define NIX_AF_STATUS (0x10ull)
+#define NIX_AF_NDC_CFG (0x18ull)
+#define NIX_AF_CONST (0x20ull)
+#define NIX_AF_CONST1 (0x28ull)
+#define NIX_AF_CONST2 (0x30ull)
+#define NIX_AF_CONST3 (0x38ull)
+#define NIX_AF_SQ_CONST (0x40ull)
+#define NIX_AF_CQ_CONST (0x48ull)
+#define NIX_AF_RQ_CONST (0x50ull)
+#define NIX_AF_PSE_CONST (0x60ull)
+#define NIX_AF_TL1_CONST (0x70ull)
+#define NIX_AF_TL2_CONST (0x78ull)
+#define NIX_AF_TL3_CONST (0x80ull)
+#define NIX_AF_TL4_CONST (0x88ull)
+#define NIX_AF_MDQ_CONST (0x90ull)
+#define NIX_AF_MC_MIRROR_CONST (0x98ull)
+#define NIX_AF_LSO_CFG (0xa8ull)
+#define NIX_AF_BLK_RST (0xb0ull)
+#define NIX_AF_TX_TSTMP_CFG (0xc0ull)
+#define NIX_AF_RX_CFG (0xd0ull)
+#define NIX_AF_AVG_DELAY (0xe0ull)
+#define NIX_AF_CINT_DELAY (0xf0ull)
+#define NIX_AF_RX_MCAST_BASE (0x100ull)
+#define NIX_AF_RX_MCAST_CFG (0x110ull)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x120ull)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x130ull)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x140ull)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x148ull)
+#define NIX_AF_LF_RST (0x150ull)
+#define NIX_AF_GEN_INT (0x160ull)
+#define NIX_AF_GEN_INT_W1S (0x168ull)
+#define NIX_AF_GEN_INT_ENA_W1S (0x170ull)
+#define NIX_AF_GEN_INT_ENA_W1C (0x178ull)
+#define NIX_AF_ERR_INT (0x180ull)
+#define NIX_AF_ERR_INT_W1S (0x188ull)
+#define NIX_AF_ERR_INT_ENA_W1S (0x190ull)
+#define NIX_AF_ERR_INT_ENA_W1C (0x198ull)
+#define NIX_AF_RAS (0x1a0ull)
+#define NIX_AF_RAS_W1S (0x1a8ull)
+#define NIX_AF_RAS_ENA_W1S (0x1b0ull)
+#define NIX_AF_RAS_ENA_W1C (0x1b8ull)
+#define NIX_AF_RVU_INT (0x1c0ull)
+#define NIX_AF_RVU_INT_W1S (0x1c8ull)
+#define NIX_AF_RVU_INT_ENA_W1S (0x1d0ull)
+#define NIX_AF_RVU_INT_ENA_W1C (0x1d8ull)
+#define NIX_AF_TCP_TIMER (0x1e0ull)
+#define NIX_AF_RX_DEF_OL2 (0x200ull)
+#define NIX_AF_RX_DEF_OIP4 (0x210ull)
+#define NIX_AF_RX_DEF_IIP4 (0x220ull)
+#define NIX_AF_RX_DEF_OIP6 (0x230ull)
+#define NIX_AF_RX_DEF_IIP6 (0x240ull)
+#define NIX_AF_RX_DEF_OTCP (0x250ull)
+#define NIX_AF_RX_DEF_ITCP (0x260ull)
+#define NIX_AF_RX_DEF_OUDP (0x270ull)
+#define NIX_AF_RX_DEF_IUDP (0x280ull)
+#define NIX_AF_RX_DEF_OSCTP (0x290ull)
+#define NIX_AF_RX_DEF_ISCTP (0x2a0ull)
+#define NIX_AF_RX_DEF_IPSECX(a) (0x2b0ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x300ull)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x3e0ull)
+#define NIX_AF_NDC_TX_SYNC (0x3f0ull)
+#define NIX_AF_AQ_CFG (0x400ull)
+#define NIX_AF_AQ_BASE (0x410ull)
+#define NIX_AF_AQ_STATUS (0x420ull)
+#define NIX_AF_AQ_DOOR (0x430ull)
+#define NIX_AF_AQ_DONE_WAIT (0x440ull)
+#define NIX_AF_AQ_DONE (0x450ull)
+#define NIX_AF_AQ_DONE_ACK (0x460ull)
+#define NIX_AF_AQ_DONE_TIMER (0x470ull)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x490ull)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x498ull)
+#define NIX_AF_RX_LINKX_CFG(a) (0x540ull | (uint64_t)(a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x550ull)
+#define NIX_AF_RX_LINKX_WRR_CFG(a) (0x560ull | (uint64_t)(a) << 16)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x640ull)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x648ull)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x650ull)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x660ull)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x668ull)
+#define NIX_AF_TX_NPC_CAPTURE_RESPX(a) (0x680ull | (uint64_t)(a) << 3)
+#define NIX_AF_SEB_ACTIVE_CYCLES_PCX(a) (0x6c0ull | (uint64_t)(a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_HEAD(a) (0x710ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_TAIL(a) (0x720ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_NXT_HEAD(a) (0x740ull | (uint64_t)(a) << 16)
+#define NIX_AF_SQM_ACTIVE_CYCLES_PC (0x770ull)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800ull)
+#define NIX_AF_PSE_SHAPER_CFG (0x810ull)
+#define NIX_AF_PSE_ACTIVE_CYCLES_PC (0x8c0ull)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900ull | (uint64_t)(a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xa00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xa10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xa20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xa30ull | (uint64_t)(a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40ull)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xa60ull | (uint64_t)(a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xac0ull | (uint64_t)(a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xb00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xb10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xc00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xc10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xc20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xc50ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xc70ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xc80ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xcc0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xcc8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xcd0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xcd8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xd20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xd30ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xd40ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xd50ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xd60ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xd70ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xd80ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xd90ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xe00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xe10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xe20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xe30ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xe40ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xe50ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xe70ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xe80ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xe88ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xec0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xec8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xed0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xed8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) \
+ (0x1000ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) \
+ (0x1010ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_CIR(a) \
+ (0x1020ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_PIR(a) \
+ (0x1030ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) \
+ (0x1040ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) \
+ (0x1050ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) \
+ (0x1070ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) \
+ (0x1080ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_PARENT(a) \
+ (0x1088ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) \
+ (0x10c0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) \
+ (0x10c8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) \
+ (0x10d0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) \
+ (0x10d8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) \
+ (0x1200ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) \
+ (0x1210ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_CIR(a) \
+ (0x1220ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_PIR(a) \
+ (0x1230ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) \
+ (0x1240ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) \
+ (0x1250ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) \
+ (0x1270ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) \
+ (0x1280ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_PARENT(a) \
+ (0x1288ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) \
+ (0x12c0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) \
+ (0x12c8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) \
+ (0x12d0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) \
+ (0x12d8ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) \
+ (0x1400ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) \
+ (0x1410ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_CIR(a) \
+ (0x1420ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_PIR(a) \
+ (0x1430ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) \
+ (0x1440ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) \
+ (0x1450ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) \
+ (0x1470ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_PARENT(a) \
+ (0x1480ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) \
+ (0x14c0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3_TL2X_CFG(a) \
+ (0x1600ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) \
+ (0x1610ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) \
+ (0x1700ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) \
+ (0x1800ull | (uint64_t)(a) << 18 | (uint64_t)(b) << 3)
+#define NIX_AF_TX_MCASTX(a) \
+ (0x1900ull | (uint64_t)(a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) \
+ (0x1a00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) \
+ (0x1a10ull | (uint64_t)(a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) \
+ (0x1a20ull | (uint64_t)(a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) \
+ (0x1a30ull | (uint64_t)(a) << 15)
+#define NIX_AF_CINT_TIMERX(a) \
+ (0x1a40ull | (uint64_t)(a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) \
+ (0x1b00ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_CFG(a) \
+ (0x4000ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) \
+ (0x4020ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) \
+ (0x4028ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) \
+ (0x4030ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) \
+ (0x4040ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) \
+ (0x4050ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) \
+ (0x4060ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) \
+ (0x4070ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) \
+ (0x4080ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) \
+ (0x4090ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) \
+ (0x40a0ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) \
+ (0x40c0ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) \
+ (0x40d0ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) \
+ (0x4100ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) \
+ (0x4110ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) \
+ (0x4120ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) \
+ (0x4130ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) \
+ (0x4140ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) \
+ (0x4148ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) \
+ (0x4150ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) \
+ (0x4158ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) \
+ (0x4170ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) \
+ (0x4180ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) \
+ (0x4200ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) \
+ (0x4300ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) \
+ (0x4400ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) \
+ (0x4500ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) \
+ (0x4600ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700ull)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710ull)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720ull)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730ull)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) \
+ (0x4800ull | (uint64_t)(a) << 16)
+#define NIX_PRIV_AF_INT_CFG (0x8000000ull)
+#define NIX_PRIV_LFX_CFG(a) \
+ (0x8000010ull | (uint64_t)(a) << 8)
+#define NIX_PRIV_LFX_INT_CFG(a) \
+ (0x8000020ull | (uint64_t)(a) << 8)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030ull)
+
+#define NIX_LF_RX_SECRETX(a) (0x0ull | (uint64_t)(a) << 3)
+#define NIX_LF_CFG (0x100ull)
+#define NIX_LF_GINT (0x200ull)
+#define NIX_LF_GINT_W1S (0x208ull)
+#define NIX_LF_GINT_ENA_W1C (0x210ull)
+#define NIX_LF_GINT_ENA_W1S (0x218ull)
+#define NIX_LF_ERR_INT (0x220ull)
+#define NIX_LF_ERR_INT_W1S (0x228ull)
+#define NIX_LF_ERR_INT_ENA_W1C (0x230ull)
+#define NIX_LF_ERR_INT_ENA_W1S (0x238ull)
+#define NIX_LF_RAS (0x240ull)
+#define NIX_LF_RAS_W1S (0x248ull)
+#define NIX_LF_RAS_ENA_W1C (0x250ull)
+#define NIX_LF_RAS_ENA_W1S (0x258ull)
+#define NIX_LF_SQ_OP_ERR_DBG (0x260ull)
+#define NIX_LF_MNQ_ERR_DBG (0x270ull)
+#define NIX_LF_SEND_ERR_DBG (0x280ull)
+#define NIX_LF_TX_STATX(a) (0x300ull | (uint64_t)(a) << 3)
+#define NIX_LF_RX_STATX(a) (0x400ull | (uint64_t)(a) << 3)
+#define NIX_LF_OP_SENDX(a) (0x800ull | (uint64_t)(a) << 3)
+#define NIX_LF_RQ_OP_INT (0x900ull)
+#define NIX_LF_RQ_OP_OCTS (0x910ull)
+#define NIX_LF_RQ_OP_PKTS (0x920ull)
+#define NIX_LF_RQ_OP_DROP_OCTS (0x930ull)
+#define NIX_LF_RQ_OP_DROP_PKTS (0x940ull)
+#define NIX_LF_RQ_OP_RE_PKTS (0x950ull)
+#define NIX_LF_OP_IPSEC_DYNO_CNT (0x980ull)
+#define NIX_LF_SQ_OP_INT (0xa00ull)
+#define NIX_LF_SQ_OP_OCTS (0xa10ull)
+#define NIX_LF_SQ_OP_PKTS (0xa20ull)
+#define NIX_LF_SQ_OP_STATUS (0xa30ull)
+#define NIX_LF_SQ_OP_DROP_OCTS (0xa40ull)
+#define NIX_LF_SQ_OP_DROP_PKTS (0xa50ull)
+#define NIX_LF_CQ_OP_INT (0xb00ull)
+#define NIX_LF_CQ_OP_DOOR (0xb30ull)
+#define NIX_LF_CQ_OP_STATUS (0xb40ull)
+#define NIX_LF_QINTX_CNT(a) (0xc00ull | (uint64_t)(a) << 12)
+#define NIX_LF_QINTX_INT(a) (0xc10ull | (uint64_t)(a) << 12)
+#define NIX_LF_QINTX_ENA_W1S(a) (0xc20ull | (uint64_t)(a) << 12)
+#define NIX_LF_QINTX_ENA_W1C(a) (0xc30ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_CNT(a) (0xd00ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_WAIT(a) (0xd10ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_INT(a) (0xd20ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_INT_W1S(a) (0xd30ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_ENA_W1S(a) (0xd40ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_ENA_W1C(a) (0xd50ull | (uint64_t)(a) << 12)
+
+
+/* Enum offsets */
+
+#define NIX_TX_VTAGOP_NOP (0x0ull)
+#define NIX_TX_VTAGOP_INSERT (0x1ull)
+#define NIX_TX_VTAGOP_REPLACE (0x2ull)
+
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NIX_INTF_RX (0x0ull)
+#define NIX_INTF_TX (0x1ull)
+
+#define NIX_TXLAYER_OL3 (0x0ull)
+#define NIX_TXLAYER_OL4 (0x1ull)
+#define NIX_TXLAYER_IL3 (0x2ull)
+#define NIX_TXLAYER_IL4 (0x3ull)
+
+#define NIX_SUBDC_NOP (0x0ull)
+#define NIX_SUBDC_EXT (0x1ull)
+#define NIX_SUBDC_CRC (0x2ull)
+#define NIX_SUBDC_IMM (0x3ull)
+#define NIX_SUBDC_SG (0x4ull)
+#define NIX_SUBDC_MEM (0x5ull)
+#define NIX_SUBDC_JUMP (0x6ull)
+#define NIX_SUBDC_WORK (0x7ull)
+#define NIX_SUBDC_SOD (0xfull)
+
+#define NIX_STYPE_STF (0x0ull)
+#define NIX_STYPE_STT (0x1ull)
+#define NIX_STYPE_STP (0x2ull)
+
+#define NIX_STAT_LF_TX_TX_UCAST (0x0ull)
+#define NIX_STAT_LF_TX_TX_BCAST (0x1ull)
+#define NIX_STAT_LF_TX_TX_MCAST (0x2ull)
+#define NIX_STAT_LF_TX_TX_DROP (0x3ull)
+#define NIX_STAT_LF_TX_TX_OCTS (0x4ull)
+
+#define NIX_STAT_LF_RX_RX_OCTS (0x0ull)
+#define NIX_STAT_LF_RX_RX_UCAST (0x1ull)
+#define NIX_STAT_LF_RX_RX_BCAST (0x2ull)
+#define NIX_STAT_LF_RX_RX_MCAST (0x3ull)
+#define NIX_STAT_LF_RX_RX_DROP (0x4ull)
+#define NIX_STAT_LF_RX_RX_DROP_OCTS (0x5ull)
+#define NIX_STAT_LF_RX_RX_FCS (0x6ull)
+#define NIX_STAT_LF_RX_RX_ERR (0x7ull)
+#define NIX_STAT_LF_RX_RX_DRP_BCAST (0x8ull)
+#define NIX_STAT_LF_RX_RX_DRP_MCAST (0x9ull)
+#define NIX_STAT_LF_RX_RX_DRP_L3BCAST (0xaull)
+#define NIX_STAT_LF_RX_RX_DRP_L3MCAST (0xbull)
+
+#define NIX_SQOPERR_SQ_OOR (0x0ull)
+#define NIX_SQOPERR_SQ_CTX_FAULT (0x1ull)
+#define NIX_SQOPERR_SQ_CTX_POISON (0x2ull)
+#define NIX_SQOPERR_SQ_DISABLED (0x3ull)
+#define NIX_SQOPERR_MAX_SQE_SIZE_ERR (0x4ull)
+#define NIX_SQOPERR_SQE_OFLOW (0x5ull)
+#define NIX_SQOPERR_SQB_NULL (0x6ull)
+#define NIX_SQOPERR_SQB_FAULT (0x7ull)
+
+#define NIX_XQESZ_W64 (0x0ull)
+#define NIX_XQESZ_W16 (0x1ull)
+
+#define NIX_VTAGSIZE_T4 (0x0ull)
+#define NIX_VTAGSIZE_T8 (0x1ull)
+
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+#define NIX_RX_ACTIONOP_PF_FUNC_DROP (0x5ull)
+#define NIX_RX_ACTIONOP_MIRROR (0x6ull)
+
+#define NIX_RX_VTAGACTION_VTAG0_RELPTR (0x0ull)
+#define NIX_RX_VTAGACTION_VTAG1_RELPTR (0x4ull)
+#define NIX_RX_VTAGACTION_VTAG_VALID (0x1ull)
+#define NIX_TX_VTAGACTION_VTAG0_RELPTR \
+ (sizeof(struct nix_inst_hdr_s) + 2 * 6)
+#define NIX_TX_VTAGACTION_VTAG1_RELPTR \
+ (sizeof(struct nix_inst_hdr_s) + 2 * 6 + 4)
+#define NIX_RQINT_DROP (0x0ull)
+#define NIX_RQINT_RED (0x1ull)
+#define NIX_RQINT_R2 (0x2ull)
+#define NIX_RQINT_R3 (0x3ull)
+#define NIX_RQINT_R4 (0x4ull)
+#define NIX_RQINT_R5 (0x5ull)
+#define NIX_RQINT_R6 (0x6ull)
+#define NIX_RQINT_R7 (0x7ull)
+
+#define NIX_MAXSQESZ_W16 (0x0ull)
+#define NIX_MAXSQESZ_W8 (0x1ull)
+
+#define NIX_LSOALG_NOP (0x0ull)
+#define NIX_LSOALG_ADD_SEGNUM (0x1ull)
+#define NIX_LSOALG_ADD_PAYLEN (0x2ull)
+#define NIX_LSOALG_ADD_OFFSET (0x3ull)
+#define NIX_LSOALG_TCP_FLAGS (0x4ull)
+
+#define NIX_MNQERR_SQ_CTX_FAULT (0x0ull)
+#define NIX_MNQERR_SQ_CTX_POISON (0x1ull)
+#define NIX_MNQERR_SQB_FAULT (0x2ull)
+#define NIX_MNQERR_SQB_POISON (0x3ull)
+#define NIX_MNQERR_TOTAL_ERR (0x4ull)
+#define NIX_MNQERR_LSO_ERR (0x5ull)
+#define NIX_MNQERR_CQ_QUERY_ERR (0x6ull)
+#define NIX_MNQERR_MAX_SQE_SIZE_ERR (0x7ull)
+#define NIX_MNQERR_MAXLEN_ERR (0x8ull)
+#define NIX_MNQERR_SQE_SIZEM1_ZERO (0x9ull)
+
+#define NIX_MDTYPE_RSVD (0x0ull)
+#define NIX_MDTYPE_FLUSH (0x1ull)
+#define NIX_MDTYPE_PMD (0x2ull)
+
+#define NIX_NDC_TX_PORT_LMT (0x0ull)
+#define NIX_NDC_TX_PORT_ENQ (0x1ull)
+#define NIX_NDC_TX_PORT_MNQ (0x2ull)
+#define NIX_NDC_TX_PORT_DEQ (0x3ull)
+#define NIX_NDC_TX_PORT_DMA (0x4ull)
+#define NIX_NDC_TX_PORT_XQE (0x5ull)
+
+#define NIX_NDC_RX_PORT_AQ (0x0ull)
+#define NIX_NDC_RX_PORT_CQ (0x1ull)
+#define NIX_NDC_RX_PORT_CINT (0x2ull)
+#define NIX_NDC_RX_PORT_MC (0x3ull)
+#define NIX_NDC_RX_PORT_PKT (0x4ull)
+#define NIX_NDC_RX_PORT_RQ (0x5ull)
+
+#define NIX_RE_OPCODE_RE_NONE (0x0ull)
+#define NIX_RE_OPCODE_RE_PARTIAL (0x1ull)
+#define NIX_RE_OPCODE_RE_JABBER (0x2ull)
+#define NIX_RE_OPCODE_RE_FCS (0x7ull)
+#define NIX_RE_OPCODE_RE_FCS_RCV (0x8ull)
+#define NIX_RE_OPCODE_RE_TERMINATE (0x9ull)
+#define NIX_RE_OPCODE_RE_RX_CTL (0xbull)
+#define NIX_RE_OPCODE_RE_SKIP (0xcull)
+#define NIX_RE_OPCODE_RE_DMAPKT (0xfull)
+#define NIX_RE_OPCODE_UNDERSIZE (0x10ull)
+#define NIX_RE_OPCODE_OVERSIZE (0x11ull)
+#define NIX_RE_OPCODE_OL2_LENMISM (0x12ull)
+
+#define NIX_REDALG_STD (0x0ull)
+#define NIX_REDALG_SEND (0x1ull)
+#define NIX_REDALG_STALL (0x2ull)
+#define NIX_REDALG_DISCARD (0x3ull)
+
+#define NIX_RX_MCOP_RQ (0x0ull)
+#define NIX_RX_MCOP_RSS (0x1ull)
+
+#define NIX_RX_PERRCODE_NPC_RESULT_ERR (0x2ull)
+#define NIX_RX_PERRCODE_MCAST_FAULT (0x4ull)
+#define NIX_RX_PERRCODE_MIRROR_FAULT (0x5ull)
+#define NIX_RX_PERRCODE_MCAST_POISON (0x6ull)
+#define NIX_RX_PERRCODE_MIRROR_POISON (0x7ull)
+#define NIX_RX_PERRCODE_DATA_FAULT (0x8ull)
+#define NIX_RX_PERRCODE_MEMOUT (0x9ull)
+#define NIX_RX_PERRCODE_BUFS_OFLOW (0xaull)
+#define NIX_RX_PERRCODE_OL3_LEN (0x10ull)
+#define NIX_RX_PERRCODE_OL4_LEN (0x11ull)
+#define NIX_RX_PERRCODE_OL4_CHK (0x12ull)
+#define NIX_RX_PERRCODE_OL4_PORT (0x13ull)
+#define NIX_RX_PERRCODE_IL3_LEN (0x20ull)
+#define NIX_RX_PERRCODE_IL4_LEN (0x21ull)
+#define NIX_RX_PERRCODE_IL4_CHK (0x22ull)
+#define NIX_RX_PERRCODE_IL4_PORT (0x23ull)
+
+#define NIX_SENDCRCALG_CRC32 (0x0ull)
+#define NIX_SENDCRCALG_CRC32C (0x1ull)
+#define NIX_SENDCRCALG_ONES16 (0x2ull)
+
+#define NIX_SENDL3TYPE_NONE (0x0ull)
+#define NIX_SENDL3TYPE_IP4 (0x2ull)
+#define NIX_SENDL3TYPE_IP4_CKSUM (0x3ull)
+#define NIX_SENDL3TYPE_IP6 (0x4ull)
+
+#define NIX_SENDL4TYPE_NONE (0x0ull)
+#define NIX_SENDL4TYPE_TCP_CKSUM (0x1ull)
+#define NIX_SENDL4TYPE_SCTP_CKSUM (0x2ull)
+#define NIX_SENDL4TYPE_UDP_CKSUM (0x3ull)
+
+#define NIX_SENDLDTYPE_LDD (0x0ull)
+#define NIX_SENDLDTYPE_LDT (0x1ull)
+#define NIX_SENDLDTYPE_LDWB (0x2ull)
+
+#define NIX_SENDMEMALG_SET (0x0ull)
+#define NIX_SENDMEMALG_SETTSTMP (0x1ull)
+#define NIX_SENDMEMALG_SETRSLT (0x2ull)
+#define NIX_SENDMEMALG_ADD (0x8ull)
+#define NIX_SENDMEMALG_SUB (0x9ull)
+#define NIX_SENDMEMALG_ADDLEN (0xaull)
+#define NIX_SENDMEMALG_SUBLEN (0xbull)
+#define NIX_SENDMEMALG_ADDMBUF (0xcull)
+#define NIX_SENDMEMALG_SUBMBUF (0xdull)
+
+#define NIX_SENDMEMDSZ_B64 (0x0ull)
+#define NIX_SENDMEMDSZ_B32 (0x1ull)
+#define NIX_SENDMEMDSZ_B16 (0x2ull)
+#define NIX_SENDMEMDSZ_B8 (0x3ull)
+
+#define NIX_SEND_STATUS_GOOD (0x0ull)
+#define NIX_SEND_STATUS_SQ_CTX_FAULT (0x1ull)
+#define NIX_SEND_STATUS_SQ_CTX_POISON (0x2ull)
+#define NIX_SEND_STATUS_SQB_FAULT (0x3ull)
+#define NIX_SEND_STATUS_SQB_POISON (0x4ull)
+#define NIX_SEND_STATUS_SEND_HDR_ERR (0x5ull)
+#define NIX_SEND_STATUS_SEND_EXT_ERR (0x6ull)
+#define NIX_SEND_STATUS_JUMP_FAULT (0x7ull)
+#define NIX_SEND_STATUS_JUMP_POISON (0x8ull)
+#define NIX_SEND_STATUS_SEND_CRC_ERR (0x10ull)
+#define NIX_SEND_STATUS_SEND_IMM_ERR (0x11ull)
+#define NIX_SEND_STATUS_SEND_SG_ERR (0x12ull)
+#define NIX_SEND_STATUS_SEND_MEM_ERR (0x13ull)
+#define NIX_SEND_STATUS_INVALID_SUBDC (0x14ull)
+#define NIX_SEND_STATUS_SUBDC_ORDER_ERR (0x15ull)
+#define NIX_SEND_STATUS_DATA_FAULT (0x16ull)
+#define NIX_SEND_STATUS_DATA_POISON (0x17ull)
+#define NIX_SEND_STATUS_NPC_DROP_ACTION (0x20ull)
+#define NIX_SEND_STATUS_LOCK_VIOL (0x21ull)
+#define NIX_SEND_STATUS_NPC_UCAST_CHAN_ERR (0x22ull)
+#define NIX_SEND_STATUS_NPC_MCAST_CHAN_ERR (0x23ull)
+#define NIX_SEND_STATUS_NPC_MCAST_ABORT (0x24ull)
+#define NIX_SEND_STATUS_NPC_VTAG_PTR_ERR (0x25ull)
+#define NIX_SEND_STATUS_NPC_VTAG_SIZE_ERR (0x26ull)
+#define NIX_SEND_STATUS_SEND_MEM_FAULT (0x27ull)
+
+#define NIX_SQINT_LMT_ERR (0x0ull)
+#define NIX_SQINT_MNQ_ERR (0x1ull)
+#define NIX_SQINT_SEND_ERR (0x2ull)
+#define NIX_SQINT_SQB_ALLOC_FAIL (0x3ull)
+
+#define NIX_XQE_TYPE_INVALID (0x0ull)
+#define NIX_XQE_TYPE_RX (0x1ull)
+#define NIX_XQE_TYPE_RX_IPSECS (0x2ull)
+#define NIX_XQE_TYPE_RX_IPSECH (0x3ull)
+#define NIX_XQE_TYPE_RX_IPSECD (0x4ull)
+#define NIX_XQE_TYPE_SEND (0x8ull)
+
+#define NIX_AQ_COMP_NOTDONE (0x0ull)
+#define NIX_AQ_COMP_GOOD (0x1ull)
+#define NIX_AQ_COMP_SWERR (0x2ull)
+#define NIX_AQ_COMP_CTX_POISON (0x3ull)
+#define NIX_AQ_COMP_CTX_FAULT (0x4ull)
+#define NIX_AQ_COMP_LOCKERR (0x5ull)
+#define NIX_AQ_COMP_SQB_ALLOC_FAIL (0x6ull)
+
+#define NIX_AF_INT_VEC_RVU (0x0ull)
+#define NIX_AF_INT_VEC_GEN (0x1ull)
+#define NIX_AF_INT_VEC_AQ_DONE (0x2ull)
+#define NIX_AF_INT_VEC_AF_ERR (0x3ull)
+#define NIX_AF_INT_VEC_POISON (0x4ull)
+
+#define NIX_AQINT_GEN_RX_MCAST_DROP (0x0ull)
+#define NIX_AQINT_GEN_RX_MIRROR_DROP (0x1ull)
+#define NIX_AQINT_GEN_TL1_DRAIN (0x3ull)
+#define NIX_AQINT_GEN_SMQ_FLUSH_DONE (0x4ull)
+
+#define NIX_AQ_INSTOP_NOP (0x0ull)
+#define NIX_AQ_INSTOP_INIT (0x1ull)
+#define NIX_AQ_INSTOP_WRITE (0x2ull)
+#define NIX_AQ_INSTOP_READ (0x3ull)
+#define NIX_AQ_INSTOP_LOCK (0x4ull)
+#define NIX_AQ_INSTOP_UNLOCK (0x5ull)
+
+#define NIX_AQ_CTYPE_RQ (0x0ull)
+#define NIX_AQ_CTYPE_SQ (0x1ull)
+#define NIX_AQ_CTYPE_CQ (0x2ull)
+#define NIX_AQ_CTYPE_MCE (0x3ull)
+#define NIX_AQ_CTYPE_RSS (0x4ull)
+#define NIX_AQ_CTYPE_DYNO (0x5ull)
+
+#define NIX_COLORRESULT_GREEN (0x0ull)
+#define NIX_COLORRESULT_YELLOW (0x1ull)
+#define NIX_COLORRESULT_RED_SEND (0x2ull)
+#define NIX_COLORRESULT_RED_DROP (0x3ull)
+
+#define NIX_CHAN_LBKX_CHX(a, b) \
+ (0x000ull | ((uint64_t)(a) << 8) | (uint64_t)(b))
+#define NIX_CHAN_R4 (0x400ull)
+#define NIX_CHAN_R5 (0x500ull)
+#define NIX_CHAN_R6 (0x600ull)
+#define NIX_CHAN_SDP_CH_END (0x7ffull)
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_CGXX_LMACX_CHX(a, b, c) \
+ (0x800ull | ((uint64_t)(a) << 8) | ((uint64_t)(b) << 4) | \
+ (uint64_t)(c))
+
+#define NIX_INTF_SDP (0x4ull)
+#define NIX_INTF_CGX0 (0x0ull)
+#define NIX_INTF_CGX1 (0x1ull)
+#define NIX_INTF_CGX2 (0x2ull)
+#define NIX_INTF_LBK0 (0x3ull)
+
+#define NIX_CQERRINT_DOOR_ERR (0x0ull)
+#define NIX_CQERRINT_WR_FULL (0x1ull)
+#define NIX_CQERRINT_CQE_FAULT (0x2ull)
+
+#define NIX_LF_INT_VEC_GINT (0x80ull)
+#define NIX_LF_INT_VEC_ERR_INT (0x81ull)
+#define NIX_LF_INT_VEC_POISON (0x82ull)
+#define NIX_LF_INT_VEC_QINT_END (0x3full)
+#define NIX_LF_INT_VEC_QINT_START (0x0ull)
+#define NIX_LF_INT_VEC_CINT_END (0x7full)
+#define NIX_LF_INT_VEC_CINT_START (0x40ull)
+
+/* Enums definitions */
+
+/* Structures definitions */
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t lf : 7;
+ uint64_t rsvd_23_15 : 9;
+ uint64_t cindex : 20;
+ uint64_t rsvd_62_44 : 19;
+ uint64_t doneint : 1;
+ uint64_t res_addr : 64; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t compcode : 8;
+ uint64_t doneint : 1;
+ uint64_t rsvd_63_17 : 47;
+ uint64_t rsvd_127_64 : 64; /* W1 */
+};
+
+/* NIX completion interrupt context hardware structure */
+struct nix_cint_hw_s {
+ uint64_t ecount : 32;
+ uint64_t qcount : 16;
+ uint64_t intr : 1;
+ uint64_t ena : 1;
+ uint64_t timer_idx : 8;
+ uint64_t rsvd_63_58 : 6;
+ uint64_t ecount_wait : 32;
+ uint64_t qcount_wait : 16;
+ uint64_t time_wait : 8;
+ uint64_t rsvd_127_120 : 8;
+};
+
+/* NIX completion queue entry header structure */
+struct nix_cqe_hdr_s {
+ uint64_t tag : 32;
+ uint64_t q : 20;
+ uint64_t rsvd_57_52 : 6;
+ uint64_t node : 2;
+ uint64_t cqe_type : 4;
+};
+
+/* NIX completion queue context structure */
+struct nix_cq_ctx_s {
+ uint64_t base : 64;/* W0 */
+ uint64_t rsvd_67_64 : 4;
+ uint64_t bp_ena : 1;
+ uint64_t rsvd_71_69 : 3;
+ uint64_t bpid : 9;
+ uint64_t rsvd_83_81 : 3;
+ uint64_t qint_idx : 7;
+ uint64_t cq_err : 1;
+ uint64_t cint_idx : 7;
+ uint64_t avg_con : 9;
+ uint64_t wrptr : 20;
+ uint64_t tail : 20;
+ uint64_t head : 20;
+ uint64_t avg_level : 8;
+ uint64_t update_time : 16;
+ uint64_t bp : 8;
+ uint64_t drop : 8;
+ uint64_t drop_ena : 1;
+ uint64_t ena : 1;
+ uint64_t rsvd_211_210 : 2;
+ uint64_t substream : 20;
+ uint64_t caching : 1;
+ uint64_t rsvd_235_233 : 3;
+ uint64_t qsize : 4;
+ uint64_t cq_err_int : 8;
+ uint64_t cq_err_int_ena : 8;
+};
+
+/* NIX instruction header structure */
+struct nix_inst_hdr_s {
+ uint64_t pf_func : 16;
+ uint64_t sq : 20;
+ uint64_t rsvd_63_36 : 28;
+};
+
+/* NIX i/o virtual address structure */
+struct nix_iova_s {
+ uint64_t addr : 64; /* W0 */
+};
+
+/* NIX IPsec dynamic ordering counter structure */
+struct nix_ipsec_dyno_s {
+ uint32_t count : 32; /* W0 */
+};
+
+/* NIX memory value structure */
+struct nix_mem_result_s {
+ uint64_t v : 1;
+ uint64_t color : 2;
+ uint64_t rsvd_63_3 : 61;
+};
+
+/* NIX statistics operation write data structure */
+struct nix_op_q_wdata_s {
+ uint64_t rsvd_31_0 : 32;
+ uint64_t q : 20;
+ uint64_t rsvd_63_52 : 12;
+};
+
+/* NIX queue interrupt context hardware structure */
+struct nix_qint_hw_s {
+ uint32_t count : 22;
+ uint32_t rsvd_30_22 : 9;
+ uint32_t ena : 1;
+};
+
+/* NIX receive queue context structure */
+struct nix_rq_ctx_hw_s {
+ uint64_t ena : 1;
+ uint64_t sso_ena : 1;
+ uint64_t ipsech_ena : 1;
+ uint64_t ena_wqwd : 1;
+ uint64_t cq : 20;
+ uint64_t substream : 20;
+ uint64_t wqe_aura : 20;
+ uint64_t spb_aura : 20;
+ uint64_t lpb_aura : 20;
+ uint64_t sso_grp : 10;
+ uint64_t sso_tt : 2;
+ uint64_t pb_caching : 2;
+ uint64_t wqe_caching : 1;
+ uint64_t xqe_drop_ena : 1;
+ uint64_t spb_drop_ena : 1;
+ uint64_t lpb_drop_ena : 1;
+ uint64_t wqe_skip : 2;
+ uint64_t rsvd_127_124 : 4;
+ uint64_t rsvd_139_128 : 12;
+ uint64_t spb_sizem1 : 6;
+ uint64_t rsvd_150_146 : 5;
+ uint64_t spb_ena : 1;
+ uint64_t lpb_sizem1 : 12;
+ uint64_t first_skip : 7;
+ uint64_t rsvd_171 : 1;
+ uint64_t later_skip : 6;
+ uint64_t xqe_imm_size : 6;
+ uint64_t rsvd_189_184 : 6;
+ uint64_t xqe_imm_copy : 1;
+ uint64_t xqe_hdr_split : 1;
+ uint64_t xqe_drop : 8;
+ uint64_t xqe_pass : 8;
+ uint64_t wqe_pool_drop : 8;
+ uint64_t wqe_pool_pass : 8;
+ uint64_t spb_aura_drop : 8;
+ uint64_t spb_aura_pass : 8;
+ uint64_t spb_pool_drop : 8;
+ uint64_t spb_pool_pass : 8;
+ uint64_t lpb_aura_drop : 8;
+ uint64_t lpb_aura_pass : 8;
+ uint64_t lpb_pool_drop : 8;
+ uint64_t lpb_pool_pass : 8;
+ uint64_t rsvd_319_288 : 32;
+ uint64_t ltag : 24;
+ uint64_t good_utag : 8;
+ uint64_t bad_utag : 8;
+ uint64_t flow_tagw : 6;
+ uint64_t rsvd_383_366 : 18;
+ uint64_t octs : 48;
+ uint64_t rsvd_447_432 : 16;
+ uint64_t pkts : 48;
+ uint64_t rsvd_511_496 : 16;
+ uint64_t drop_octs : 48;
+ uint64_t rsvd_575_560 : 16;
+ uint64_t drop_pkts : 48;
+ uint64_t rsvd_639_624 : 16;
+ uint64_t re_pkts : 48;
+ uint64_t rsvd_702_688 : 15;
+ uint64_t ena_copy : 1;
+ uint64_t rsvd_739_704 : 36;
+ uint64_t rq_int : 8;
+ uint64_t rq_int_ena : 8;
+ uint64_t qint_idx : 7;
+ uint64_t rsvd_767_763 : 5;
+ uint64_t rsvd_831_768 : 64;/* W12 */
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t rsvd_959_896 : 64;/* W14 */
+ uint64_t rsvd_1023_960 : 64;/* W15 */
+};
+
+/* NIX receive queue context structure */
+struct nix_rq_ctx_s {
+ uint64_t ena : 1;
+ uint64_t sso_ena : 1;
+ uint64_t ipsech_ena : 1;
+ uint64_t ena_wqwd : 1;
+ uint64_t cq : 20;
+ uint64_t substream : 20;
+ uint64_t wqe_aura : 20;
+ uint64_t spb_aura : 20;
+ uint64_t lpb_aura : 20;
+ uint64_t sso_grp : 10;
+ uint64_t sso_tt : 2;
+ uint64_t pb_caching : 2;
+ uint64_t wqe_caching : 1;
+ uint64_t xqe_drop_ena : 1;
+ uint64_t spb_drop_ena : 1;
+ uint64_t lpb_drop_ena : 1;
+ uint64_t rsvd_127_122 : 6;
+ uint64_t rsvd_139_128 : 12;
+ uint64_t spb_sizem1 : 6;
+ uint64_t wqe_skip : 2;
+ uint64_t rsvd_150_148 : 3;
+ uint64_t spb_ena : 1;
+ uint64_t lpb_sizem1 : 12;
+ uint64_t first_skip : 7;
+ uint64_t rsvd_171 : 1;
+ uint64_t later_skip : 6;
+ uint64_t xqe_imm_size : 6;
+ uint64_t rsvd_189_184 : 6;
+ uint64_t xqe_imm_copy : 1;
+ uint64_t xqe_hdr_split : 1;
+ uint64_t xqe_drop : 8;
+ uint64_t xqe_pass : 8;
+ uint64_t wqe_pool_drop : 8;
+ uint64_t wqe_pool_pass : 8;
+ uint64_t spb_aura_drop : 8;
+ uint64_t spb_aura_pass : 8;
+ uint64_t spb_pool_drop : 8;
+ uint64_t spb_pool_pass : 8;
+ uint64_t lpb_aura_drop : 8;
+ uint64_t lpb_aura_pass : 8;
+ uint64_t lpb_pool_drop : 8;
+ uint64_t lpb_pool_pass : 8;
+ uint64_t rsvd_291_288 : 4;
+ uint64_t rq_int : 8;
+ uint64_t rq_int_ena : 8;
+ uint64_t qint_idx : 7;
+ uint64_t rsvd_319_315 : 5;
+ uint64_t ltag : 24;
+ uint64_t good_utag : 8;
+ uint64_t bad_utag : 8;
+ uint64_t flow_tagw : 6;
+ uint64_t rsvd_383_366 : 18;
+ uint64_t octs : 48;
+ uint64_t rsvd_447_432 : 16;
+ uint64_t pkts : 48;
+ uint64_t rsvd_511_496 : 16;
+ uint64_t drop_octs : 48;
+ uint64_t rsvd_575_560 : 16;
+ uint64_t drop_pkts : 48;
+ uint64_t rsvd_639_624 : 16;
+ uint64_t re_pkts : 48;
+ uint64_t rsvd_703_688 : 16;
+ uint64_t rsvd_767_704 : 64;/* W11 */
+ uint64_t rsvd_831_768 : 64;/* W12 */
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t rsvd_959_896 : 64;/* W14 */
+ uint64_t rsvd_1023_960 : 64;/* W15 */
+};
+
+/* NIX receive side scaling entry structure */
+struct nix_rsse_s {
+ uint32_t rq : 20;
+ uint32_t rsvd_31_20 : 12;
+};
+
+/* NIX receive action structure */
+struct nix_rx_action_s {
+ uint64_t op : 4;
+ uint64_t pf_func : 16;
+ uint64_t index : 20;
+ uint64_t match_id : 16;
+ uint64_t flow_key_alg : 5;
+ uint64_t rsvd_63_61 : 3;
+};
+
+/* NIX receive immediate sub descriptor structure */
+struct nix_rx_imm_s {
+ uint64_t size : 16;
+ uint64_t apad : 3;
+ uint64_t rsvd_59_19 : 41;
+ uint64_t subdc : 4;
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+};
+
+/* NIX receive parse structure */
+struct nix_rx_parse_s {
+ uint64_t chan : 12;
+ uint64_t desc_sizem1 : 5;
+ uint64_t imm_copy : 1;
+ uint64_t express : 1;
+ uint64_t wqwd : 1;
+ uint64_t errlev : 4;
+ uint64_t errcode : 8;
+ uint64_t latype : 4;
+ uint64_t lbtype : 4;
+ uint64_t lctype : 4;
+ uint64_t ldtype : 4;
+ uint64_t letype : 4;
+ uint64_t lftype : 4;
+ uint64_t lgtype : 4;
+ uint64_t lhtype : 4;
+ uint64_t pkt_lenm1 : 16;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t vtag0_valid : 1;
+ uint64_t vtag0_gone : 1;
+ uint64_t vtag1_valid : 1;
+ uint64_t vtag1_gone : 1;
+ uint64_t pkind : 6;
+ uint64_t rsvd_95_94 : 2;
+ uint64_t vtag0_tci : 16;
+ uint64_t vtag1_tci : 16;
+ uint64_t laflags : 8;
+ uint64_t lbflags : 8;
+ uint64_t lcflags : 8;
+ uint64_t ldflags : 8;
+ uint64_t leflags : 8;
+ uint64_t lfflags : 8;
+ uint64_t lgflags : 8;
+ uint64_t lhflags : 8;
+ uint64_t eoh_ptr : 8;
+ uint64_t wqe_aura : 20;
+ uint64_t pb_aura : 20;
+ uint64_t match_id : 16;
+ uint64_t laptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t lcptr : 8;
+ uint64_t ldptr : 8;
+ uint64_t leptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t lhptr : 8;
+ uint64_t vtag0_ptr : 8;
+ uint64_t vtag1_ptr : 8;
+ uint64_t flow_key_alg : 5;
+ uint64_t rsvd_383_341 : 43;
+ uint64_t rsvd_447_384 : 64; /* W6 */
+};
+
+/* NIX receive scatter/gather sub descriptor structure */
+struct nix_rx_sg_s {
+ uint64_t seg1_size : 16;
+ uint64_t seg2_size : 16;
+ uint64_t seg3_size : 16;
+ uint64_t segs : 2;
+ uint64_t rsvd_59_50 : 10;
+ uint64_t subdc : 4;
+};
+
+/* NIX receive vtag action structure */
+struct nix_rx_vtag_action_s {
+ uint64_t vtag0_relptr : 8;
+ uint64_t vtag0_lid : 3;
+ uint64_t rsvd_11 : 1;
+ uint64_t vtag0_type : 3;
+ uint64_t vtag0_valid : 1;
+ uint64_t rsvd_31_16 : 16;
+ uint64_t vtag1_relptr : 8;
+ uint64_t vtag1_lid : 3;
+ uint64_t rsvd_43 : 1;
+ uint64_t vtag1_type : 3;
+ uint64_t vtag1_valid : 1;
+ uint64_t rsvd_63_48 : 16;
+};
+
+/* NIX send completion structure */
+struct nix_send_comp_s {
+ uint64_t status : 8;
+ uint64_t sqe_id : 16;
+ uint64_t rsvd_63_24 : 40;
+};
+
+/* NIX send CRC sub descriptor structure */
+struct nix_send_crc_s {
+ uint64_t size : 16;
+ uint64_t start : 16;
+ uint64_t insert : 16;
+ uint64_t rsvd_57_48 : 10;
+ uint64_t alg : 2;
+ uint64_t subdc : 4;
+ uint64_t iv : 32;
+ uint64_t rsvd_127_96 : 32;
+};
+
+/* NIX send extended header sub descriptor structure */
+RTE_STD_C11
+union nix_send_ext_w0_u {
+ uint64_t u;
+ struct {
+ uint64_t lso_mps : 14;
+ uint64_t lso : 1;
+ uint64_t tstmp : 1;
+ uint64_t lso_sb : 8;
+ uint64_t lso_format : 5;
+ uint64_t rsvd_31_29 : 3;
+ uint64_t shp_chg : 9;
+ uint64_t shp_dis : 1;
+ uint64_t shp_ra : 2;
+ uint64_t markptr : 8;
+ uint64_t markform : 7;
+ uint64_t mark_en : 1;
+ uint64_t subdc : 4;
+ };
+};
+
+RTE_STD_C11
+union nix_send_ext_w1_u {
+ uint64_t u;
+ struct {
+ uint64_t vlan0_ins_ptr : 8;
+ uint64_t vlan0_ins_tci : 16;
+ uint64_t vlan1_ins_ptr : 8;
+ uint64_t vlan1_ins_tci : 16;
+ uint64_t vlan0_ins_ena : 1;
+ uint64_t vlan1_ins_ena : 1;
+ uint64_t rsvd_127_114 : 14;
+ };
+};
+
+struct nix_send_ext_s {
+ union nix_send_ext_w0_u w0;
+ union nix_send_ext_w1_u w1;
+};
+
+/* NIX send header sub descriptor structure */
+RTE_STD_C11
+union nix_send_hdr_w0_u {
+ uint64_t u;
+ struct {
+ uint64_t total : 18;
+ uint64_t rsvd_18 : 1;
+ uint64_t df : 1;
+ uint64_t aura : 20;
+ uint64_t sizem1 : 3;
+ uint64_t pnc : 1;
+ uint64_t sq : 20;
+ };
+};
+
+RTE_STD_C11
+union nix_send_hdr_w1_u {
+ uint64_t u;
+ struct {
+ uint64_t ol3ptr : 8;
+ uint64_t ol4ptr : 8;
+ uint64_t il3ptr : 8;
+ uint64_t il4ptr : 8;
+ uint64_t ol3type : 4;
+ uint64_t ol4type : 4;
+ uint64_t il3type : 4;
+ uint64_t il4type : 4;
+ uint64_t sqe_id : 16;
+ };
+};
+
+struct nix_send_hdr_s {
+ union nix_send_hdr_w0_u w0;
+ union nix_send_hdr_w1_u w1;
+};
+
+/* NIX send immediate sub descriptor structure */
+struct nix_send_imm_s {
+ uint64_t size : 16;
+ uint64_t apad : 3;
+ uint64_t rsvd_59_19 : 41;
+ uint64_t subdc : 4;
+};
+
+/* NIX send jump sub descriptor structure */
+struct nix_send_jump_s {
+ uint64_t sizem1 : 7;
+ uint64_t rsvd_13_7 : 7;
+ uint64_t ld_type : 2;
+ uint64_t aura : 20;
+ uint64_t rsvd_58_36 : 23;
+ uint64_t f : 1;
+ uint64_t subdc : 4;
+ uint64_t addr : 64; /* W1 */
+};
+
+/* NIX send memory sub descriptor structure */
+struct nix_send_mem_s {
+ uint64_t offset : 16;
+ uint64_t rsvd_52_16 : 37;
+ uint64_t wmem : 1;
+ uint64_t dsz : 2;
+ uint64_t alg : 4;
+ uint64_t subdc : 4;
+ uint64_t addr : 64; /* W1 */
+};
+
+/* NIX send scatter/gather sub descriptor structure */
+RTE_STD_C11
+union nix_send_sg_s {
+ uint64_t u;
+ struct {
+ uint64_t seg1_size : 16;
+ uint64_t seg2_size : 16;
+ uint64_t seg3_size : 16;
+ uint64_t segs : 2;
+ uint64_t rsvd_54_50 : 5;
+ uint64_t i1 : 1;
+ uint64_t i2 : 1;
+ uint64_t i3 : 1;
+ uint64_t ld_type : 2;
+ uint64_t subdc : 4;
+ };
+};
+
+/* NIX send work sub descriptor structure */
+struct nix_send_work_s {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t rsvd_59_44 : 16;
+ uint64_t subdc : 4;
+ uint64_t addr : 64; /* W1 */
+};
+
+/* NIX sq context hardware structure */
+struct nix_sq_ctx_hw_s {
+ uint64_t ena : 1;
+ uint64_t substream : 20;
+ uint64_t max_sqe_size : 2;
+ uint64_t sqe_way_mask : 16;
+ uint64_t sqb_aura : 20;
+ uint64_t gbl_rsvd1 : 5;
+ uint64_t cq_id : 20;
+ uint64_t cq_ena : 1;
+ uint64_t qint_idx : 6;
+ uint64_t gbl_rsvd2 : 1;
+ uint64_t sq_int : 8;
+ uint64_t sq_int_ena : 8;
+ uint64_t xoff : 1;
+ uint64_t sqe_stype : 2;
+ uint64_t gbl_rsvd : 17;
+ uint64_t head_sqb : 64;/* W2 */
+ uint64_t head_offset : 6;
+ uint64_t sqb_dequeue_count : 16;
+ uint64_t default_chan : 12;
+ uint64_t sdp_mcast : 1;
+ uint64_t sso_ena : 1;
+ uint64_t dse_rsvd1 : 28;
+ uint64_t sqb_enqueue_count : 16;
+ uint64_t tail_offset : 6;
+ uint64_t lmt_dis : 1;
+ uint64_t smq_rr_quantum : 24;
+ uint64_t dnq_rsvd1 : 17;
+ uint64_t tail_sqb : 64;/* W5 */
+ uint64_t next_sqb : 64;/* W6 */
+ uint64_t mnq_dis : 1;
+ uint64_t smq : 9;
+ uint64_t smq_pend : 1;
+ uint64_t smq_next_sq : 20;
+ uint64_t smq_next_sq_vld : 1;
+ uint64_t scm1_rsvd2 : 32;
+ uint64_t smenq_sqb : 64;/* W8 */
+ uint64_t smenq_offset : 6;
+ uint64_t cq_limit : 8;
+ uint64_t smq_rr_count : 25;
+ uint64_t scm_lso_rem : 18;
+ uint64_t scm_dq_rsvd0 : 7;
+ uint64_t smq_lso_segnum : 8;
+ uint64_t vfi_lso_total : 18;
+ uint64_t vfi_lso_sizem1 : 3;
+ uint64_t vfi_lso_sb : 8;
+ uint64_t vfi_lso_mps : 14;
+ uint64_t vfi_lso_vlan0_ins_ena : 1;
+ uint64_t vfi_lso_vlan1_ins_ena : 1;
+ uint64_t vfi_lso_vld : 1;
+ uint64_t smenq_next_sqb_vld : 1;
+ uint64_t scm_dq_rsvd1 : 9;
+ uint64_t smenq_next_sqb : 64;/* W11 */
+ uint64_t seb_rsvd1 : 64;/* W12 */
+ uint64_t drop_pkts : 48;
+ uint64_t drop_octs_lsw : 16;
+ uint64_t drop_octs_msw : 32;
+ uint64_t pkts_lsw : 32;
+ uint64_t pkts_msw : 16;
+ uint64_t octs : 48;
+};
+
+/* NIX send queue context structure */
+struct nix_sq_ctx_s {
+ uint64_t ena : 1;
+ uint64_t qint_idx : 6;
+ uint64_t substream : 20;
+ uint64_t sdp_mcast : 1;
+ uint64_t cq : 20;
+ uint64_t sqe_way_mask : 16;
+ uint64_t smq : 9;
+ uint64_t cq_ena : 1;
+ uint64_t xoff : 1;
+ uint64_t sso_ena : 1;
+ uint64_t smq_rr_quantum : 24;
+ uint64_t default_chan : 12;
+ uint64_t sqb_count : 16;
+ uint64_t smq_rr_count : 25;
+ uint64_t sqb_aura : 20;
+ uint64_t sq_int : 8;
+ uint64_t sq_int_ena : 8;
+ uint64_t sqe_stype : 2;
+ uint64_t rsvd_191 : 1;
+ uint64_t max_sqe_size : 2;
+ uint64_t cq_limit : 8;
+ uint64_t lmt_dis : 1;
+ uint64_t mnq_dis : 1;
+ uint64_t smq_next_sq : 20;
+ uint64_t smq_lso_segnum : 8;
+ uint64_t tail_offset : 6;
+ uint64_t smenq_offset : 6;
+ uint64_t head_offset : 6;
+ uint64_t smenq_next_sqb_vld : 1;
+ uint64_t smq_pend : 1;
+ uint64_t smq_next_sq_vld : 1;
+ uint64_t rsvd_255_253 : 3;
+ uint64_t next_sqb : 64;/* W4 */
+ uint64_t tail_sqb : 64;/* W5 */
+ uint64_t smenq_sqb : 64;/* W6 */
+ uint64_t smenq_next_sqb : 64;/* W7 */
+ uint64_t head_sqb : 64;/* W8 */
+ uint64_t rsvd_583_576 : 8;
+ uint64_t vfi_lso_total : 18;
+ uint64_t vfi_lso_sizem1 : 3;
+ uint64_t vfi_lso_sb : 8;
+ uint64_t vfi_lso_mps : 14;
+ uint64_t vfi_lso_vlan0_ins_ena : 1;
+ uint64_t vfi_lso_vlan1_ins_ena : 1;
+ uint64_t vfi_lso_vld : 1;
+ uint64_t rsvd_639_630 : 10;
+ uint64_t scm_lso_rem : 18;
+ uint64_t rsvd_703_658 : 46;
+ uint64_t octs : 48;
+ uint64_t rsvd_767_752 : 16;
+ uint64_t pkts : 48;
+ uint64_t rsvd_831_816 : 16;
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t drop_octs : 48;
+ uint64_t rsvd_959_944 : 16;
+ uint64_t drop_pkts : 48;
+ uint64_t rsvd_1023_1008 : 16;
+};
+
+/* NIX transmit action structure */
+struct nix_tx_action_s {
+ uint64_t op : 4;
+ uint64_t rsvd_11_4 : 8;
+ uint64_t index : 20;
+ uint64_t match_id : 16;
+ uint64_t rsvd_63_48 : 16;
+};
+
+/* NIX transmit vtag action structure */
+struct nix_tx_vtag_action_s {
+ uint64_t vtag0_relptr : 8;
+ uint64_t vtag0_lid : 3;
+ uint64_t rsvd_11 : 1;
+ uint64_t vtag0_op : 2;
+ uint64_t rsvd_15_14 : 2;
+ uint64_t vtag0_def : 10;
+ uint64_t rsvd_31_26 : 6;
+ uint64_t vtag1_relptr : 8;
+ uint64_t vtag1_lid : 3;
+ uint64_t rsvd_43 : 1;
+ uint64_t vtag1_op : 2;
+ uint64_t rsvd_47_46 : 2;
+ uint64_t vtag1_def : 10;
+ uint64_t rsvd_63_58 : 6;
+};
+
+/* NIX work queue entry header structure */
+struct nix_wqe_hdr_s {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t node : 2;
+ uint64_t q : 14;
+ uint64_t wqe_type : 4;
+};
+
+/* NIX Rx flow key algorithm field structure */
+struct nix_rx_flowkey_alg {
+ uint64_t key_offset :6;
+ uint64_t ln_mask :1;
+ uint64_t fn_mask :1;
+ uint64_t hdr_offset :8;
+ uint64_t bytesm1 :5;
+ uint64_t lid :3;
+ uint64_t reserved_24_24 :1;
+ uint64_t ena :1;
+ uint64_t sel_chan :1;
+ uint64_t ltype_mask :4;
+ uint64_t ltype_match :4;
+ uint64_t reserved_35_63 :29;
+};
+
+/* NIX LSO format field structure */
+struct nix_lso_format {
+ uint64_t offset : 8;
+ uint64_t layer : 2;
+ uint64_t rsvd_10_11 : 2;
+ uint64_t sizem1 : 2;
+ uint64_t rsvd_14_15 : 2;
+ uint64_t alg : 3;
+ uint64_t rsvd_19_63 : 45;
+};
+
+#define NIX_LSO_FIELD_MAX (8)
+#define NIX_LSO_FIELD_ALG_MASK GENMASK(18, 16)
+#define NIX_LSO_FIELD_SZ_MASK GENMASK(13, 12)
+#define NIX_LSO_FIELD_LY_MASK GENMASK(9, 8)
+#define NIX_LSO_FIELD_OFF_MASK GENMASK(7, 0)
+
+#define NIX_LSO_FIELD_MASK \
+ (NIX_LSO_FIELD_OFF_MASK | \
+ NIX_LSO_FIELD_LY_MASK | \
+ NIX_LSO_FIELD_SZ_MASK | \
+ NIX_LSO_FIELD_ALG_MASK)
+
+#endif /* __OTX2_NIX_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h
new file mode 100644
index 000000000..2224216c9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_NPA_HW_H__
+#define __OTX2_NPA_HW_H__
+
+/* Register offsets */
+
+#define NPA_AF_BLK_RST (0x0ull)
+#define NPA_AF_CONST (0x10ull)
+#define NPA_AF_CONST1 (0x18ull)
+#define NPA_AF_LF_RST (0x20ull)
+#define NPA_AF_GEN_CFG (0x30ull)
+#define NPA_AF_NDC_CFG (0x40ull)
+#define NPA_AF_NDC_SYNC (0x50ull)
+#define NPA_AF_INP_CTL (0xd0ull)
+#define NPA_AF_ACTIVE_CYCLES_PC (0xf0ull)
+#define NPA_AF_AVG_DELAY (0x100ull)
+#define NPA_AF_GEN_INT (0x140ull)
+#define NPA_AF_GEN_INT_W1S (0x148ull)
+#define NPA_AF_GEN_INT_ENA_W1S (0x150ull)
+#define NPA_AF_GEN_INT_ENA_W1C (0x158ull)
+#define NPA_AF_RVU_INT (0x160ull)
+#define NPA_AF_RVU_INT_W1S (0x168ull)
+#define NPA_AF_RVU_INT_ENA_W1S (0x170ull)
+#define NPA_AF_RVU_INT_ENA_W1C (0x178ull)
+#define NPA_AF_ERR_INT (0x180ull)
+#define NPA_AF_ERR_INT_W1S (0x188ull)
+#define NPA_AF_ERR_INT_ENA_W1S (0x190ull)
+#define NPA_AF_ERR_INT_ENA_W1C (0x198ull)
+#define NPA_AF_RAS (0x1a0ull)
+#define NPA_AF_RAS_W1S (0x1a8ull)
+#define NPA_AF_RAS_ENA_W1S (0x1b0ull)
+#define NPA_AF_RAS_ENA_W1C (0x1b8ull)
+#define NPA_AF_AQ_CFG (0x600ull)
+#define NPA_AF_AQ_BASE (0x610ull)
+#define NPA_AF_AQ_STATUS (0x620ull)
+#define NPA_AF_AQ_DOOR (0x630ull)
+#define NPA_AF_AQ_DONE_WAIT (0x640ull)
+#define NPA_AF_AQ_DONE (0x650ull)
+#define NPA_AF_AQ_DONE_ACK (0x660ull)
+#define NPA_AF_AQ_DONE_TIMER (0x670ull)
+#define NPA_AF_AQ_DONE_INT (0x680ull)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x690ull)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x698ull)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000ull | (uint64_t)(a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010ull | (uint64_t)(a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100ull | (uint64_t)(a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110ull | (uint64_t)(a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000ull)
+#define NPA_PRIV_LFX_CFG(a) (0x10010ull | (uint64_t)(a) << 8)
+#define NPA_PRIV_LFX_INT_CFG(a) (0x10020ull | (uint64_t)(a) << 8)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030ull)
+#define NPA_AF_DTX_FILTER_CTL (0x10040ull)
+
+#define NPA_LF_AURA_OP_ALLOCX(a) (0x10ull | (uint64_t)(a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (0x20ull)
+#define NPA_LF_AURA_OP_FREE1 (0x28ull)
+#define NPA_LF_AURA_OP_CNT (0x30ull)
+#define NPA_LF_AURA_OP_LIMIT (0x50ull)
+#define NPA_LF_AURA_OP_INT (0x60ull)
+#define NPA_LF_AURA_OP_THRESH (0x70ull)
+#define NPA_LF_POOL_OP_PC (0x100ull)
+#define NPA_LF_POOL_OP_AVAILABLE (0x110ull)
+#define NPA_LF_POOL_OP_PTR_START0 (0x120ull)
+#define NPA_LF_POOL_OP_PTR_START1 (0x128ull)
+#define NPA_LF_POOL_OP_PTR_END0 (0x130ull)
+#define NPA_LF_POOL_OP_PTR_END1 (0x138ull)
+#define NPA_LF_POOL_OP_INT (0x160ull)
+#define NPA_LF_POOL_OP_THRESH (0x170ull)
+#define NPA_LF_ERR_INT (0x200ull)
+#define NPA_LF_ERR_INT_W1S (0x208ull)
+#define NPA_LF_ERR_INT_ENA_W1C (0x210ull)
+#define NPA_LF_ERR_INT_ENA_W1S (0x218ull)
+#define NPA_LF_RAS (0x220ull)
+#define NPA_LF_RAS_W1S (0x228ull)
+#define NPA_LF_RAS_ENA_W1C (0x230ull)
+#define NPA_LF_RAS_ENA_W1S (0x238ull)
+#define NPA_LF_QINTX_CNT(a) (0x300ull | (uint64_t)(a) << 12)
+#define NPA_LF_QINTX_INT(a) (0x310ull | (uint64_t)(a) << 12)
+#define NPA_LF_QINTX_ENA_W1S(a) (0x320ull | (uint64_t)(a) << 12)
+#define NPA_LF_QINTX_ENA_W1C(a) (0x330ull | (uint64_t)(a) << 12)
+
+
+/* Enum offsets */
+
+#define NPA_AQ_COMP_NOTDONE (0x0ull)
+#define NPA_AQ_COMP_GOOD (0x1ull)
+#define NPA_AQ_COMP_SWERR (0x2ull)
+#define NPA_AQ_COMP_CTX_POISON (0x3ull)
+#define NPA_AQ_COMP_CTX_FAULT (0x4ull)
+#define NPA_AQ_COMP_LOCKERR (0x5ull)
+
+#define NPA_AF_INT_VEC_RVU (0x0ull)
+#define NPA_AF_INT_VEC_GEN (0x1ull)
+#define NPA_AF_INT_VEC_AQ_DONE (0x2ull)
+#define NPA_AF_INT_VEC_AF_ERR (0x3ull)
+#define NPA_AF_INT_VEC_POISON (0x4ull)
+
+#define NPA_AQ_INSTOP_NOP (0x0ull)
+#define NPA_AQ_INSTOP_INIT (0x1ull)
+#define NPA_AQ_INSTOP_WRITE (0x2ull)
+#define NPA_AQ_INSTOP_READ (0x3ull)
+#define NPA_AQ_INSTOP_LOCK (0x4ull)
+#define NPA_AQ_INSTOP_UNLOCK (0x5ull)
+
+#define NPA_AQ_CTYPE_AURA (0x0ull)
+#define NPA_AQ_CTYPE_POOL (0x1ull)
+
+#define NPA_BPINTF_NIX0_RX (0x0ull)
+#define NPA_BPINTF_NIX1_RX (0x1ull)
+
+#define NPA_AURA_ERR_INT_AURA_FREE_UNDER (0x0ull)
+#define NPA_AURA_ERR_INT_AURA_ADD_OVER (0x1ull)
+#define NPA_AURA_ERR_INT_AURA_ADD_UNDER (0x2ull)
+#define NPA_AURA_ERR_INT_POOL_DIS (0x3ull)
+#define NPA_AURA_ERR_INT_R4 (0x4ull)
+#define NPA_AURA_ERR_INT_R5 (0x5ull)
+#define NPA_AURA_ERR_INT_R6 (0x6ull)
+#define NPA_AURA_ERR_INT_R7 (0x7ull)
+
+#define NPA_LF_INT_VEC_ERR_INT (0x40ull)
+#define NPA_LF_INT_VEC_POISON (0x41ull)
+#define NPA_LF_INT_VEC_QINT_END (0x3full)
+#define NPA_LF_INT_VEC_QINT_START (0x0ull)
+
+#define NPA_INPQ_SSO (0x4ull)
+#define NPA_INPQ_TIM (0x5ull)
+#define NPA_INPQ_DPI (0x6ull)
+#define NPA_INPQ_AURA_OP (0xeull)
+#define NPA_INPQ_INTERNAL_RSV (0xfull)
+#define NPA_INPQ_NIX0_RX (0x0ull)
+#define NPA_INPQ_NIX1_RX (0x2ull)
+#define NPA_INPQ_NIX0_TX (0x1ull)
+#define NPA_INPQ_NIX1_TX (0x3ull)
+#define NPA_INPQ_R_END (0xdull)
+#define NPA_INPQ_R_START (0x7ull)
+
+#define NPA_POOL_ERR_INT_OVFLS (0x0ull)
+#define NPA_POOL_ERR_INT_RANGE (0x1ull)
+#define NPA_POOL_ERR_INT_PERR (0x2ull)
+#define NPA_POOL_ERR_INT_R3 (0x3ull)
+#define NPA_POOL_ERR_INT_R4 (0x4ull)
+#define NPA_POOL_ERR_INT_R5 (0x5ull)
+#define NPA_POOL_ERR_INT_R6 (0x6ull)
+#define NPA_POOL_ERR_INT_R7 (0x7ull)
+
+#define NPA_NDC0_PORT_AURA0 (0x0ull)
+#define NPA_NDC0_PORT_AURA1 (0x1ull)
+#define NPA_NDC0_PORT_POOL0 (0x2ull)
+#define NPA_NDC0_PORT_POOL1 (0x3ull)
+#define NPA_NDC0_PORT_STACK0 (0x4ull)
+#define NPA_NDC0_PORT_STACK1 (0x5ull)
+
+#define NPA_LF_ERR_INT_AURA_DIS (0x0ull)
+#define NPA_LF_ERR_INT_AURA_OOR (0x1ull)
+#define NPA_LF_ERR_INT_AURA_FAULT (0xcull)
+#define NPA_LF_ERR_INT_POOL_FAULT (0xdull)
+#define NPA_LF_ERR_INT_STACK_FAULT (0xeull)
+#define NPA_LF_ERR_INT_QINT_FAULT (0xfull)
+
+/* Structures definitions */
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t lf : 9;
+ uint64_t rsvd_23_17 : 7;
+ uint64_t cindex : 20;
+ uint64_t rsvd_62_44 : 19;
+ uint64_t doneint : 1;
+ uint64_t res_addr : 64; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t compcode : 8;
+ uint64_t doneint : 1;
+ uint64_t rsvd_63_17 : 47;
+ uint64_t rsvd_127_64 : 64; /* W1 */
+};
+
+/* NPA aura operation write data structure */
+struct npa_aura_op_wdata_s {
+ uint64_t aura : 20;
+ uint64_t rsvd_62_20 : 43;
+ uint64_t drop : 1;
+};
+
+/* NPA aura context structure */
+struct npa_aura_s {
+ uint64_t pool_addr : 64;/* W0 */
+ uint64_t ena : 1;
+ uint64_t rsvd_66_65 : 2;
+ uint64_t pool_caching : 1;
+ uint64_t pool_way_mask : 16;
+ uint64_t avg_con : 9;
+ uint64_t rsvd_93 : 1;
+ uint64_t pool_drop_ena : 1;
+ uint64_t aura_drop_ena : 1;
+ uint64_t bp_ena : 2;
+ uint64_t rsvd_103_98 : 6;
+ uint64_t aura_drop : 8;
+ uint64_t shift : 6;
+ uint64_t rsvd_119_118 : 2;
+ uint64_t avg_level : 8;
+ uint64_t count : 36;
+ uint64_t rsvd_167_164 : 4;
+ uint64_t nix0_bpid : 9;
+ uint64_t rsvd_179_177 : 3;
+ uint64_t nix1_bpid : 9;
+ uint64_t rsvd_191_189 : 3;
+ uint64_t limit : 36;
+ uint64_t rsvd_231_228 : 4;
+ uint64_t bp : 8;
+ uint64_t rsvd_243_240 : 4;
+ uint64_t fc_ena : 1;
+ uint64_t fc_up_crossing : 1;
+ uint64_t fc_stype : 2;
+ uint64_t fc_hyst_bits : 4;
+ uint64_t rsvd_255_252 : 4;
+ uint64_t fc_addr : 64;/* W4 */
+ uint64_t pool_drop : 8;
+ uint64_t update_time : 16;
+ uint64_t err_int : 8;
+ uint64_t err_int_ena : 8;
+ uint64_t thresh_int : 1;
+ uint64_t thresh_int_ena : 1;
+ uint64_t thresh_up : 1;
+ uint64_t rsvd_363 : 1;
+ uint64_t thresh_qint_idx : 7;
+ uint64_t rsvd_371 : 1;
+ uint64_t err_qint_idx : 7;
+ uint64_t rsvd_383_379 : 5;
+ uint64_t thresh : 36;
+ uint64_t rsvd_447_420 : 28;
+ uint64_t rsvd_511_448 : 64;/* W7 */
+};
+
+/* NPA pool context structure */
+struct npa_pool_s {
+ uint64_t stack_base : 64;/* W0 */
+ uint64_t ena : 1;
+ uint64_t nat_align : 1;
+ uint64_t rsvd_67_66 : 2;
+ uint64_t stack_caching : 1;
+ uint64_t rsvd_71_69 : 3;
+ uint64_t stack_way_mask : 16;
+ uint64_t buf_offset : 12;
+ uint64_t rsvd_103_100 : 4;
+ uint64_t buf_size : 11;
+ uint64_t rsvd_127_115 : 13;
+ uint64_t stack_max_pages : 32;
+ uint64_t stack_pages : 32;
+ uint64_t op_pc : 48;
+ uint64_t rsvd_255_240 : 16;
+ uint64_t stack_offset : 4;
+ uint64_t rsvd_263_260 : 4;
+ uint64_t shift : 6;
+ uint64_t rsvd_271_270 : 2;
+ uint64_t avg_level : 8;
+ uint64_t avg_con : 9;
+ uint64_t fc_ena : 1;
+ uint64_t fc_stype : 2;
+ uint64_t fc_hyst_bits : 4;
+ uint64_t fc_up_crossing : 1;
+ uint64_t rsvd_299_297 : 3;
+ uint64_t update_time : 16;
+ uint64_t rsvd_319_316 : 4;
+ uint64_t fc_addr : 64;/* W5 */
+ uint64_t ptr_start : 64;/* W6 */
+ uint64_t ptr_end : 64;/* W7 */
+ uint64_t rsvd_535_512 : 24;
+ uint64_t err_int : 8;
+ uint64_t err_int_ena : 8;
+ uint64_t thresh_int : 1;
+ uint64_t thresh_int_ena : 1;
+ uint64_t thresh_up : 1;
+ uint64_t rsvd_555 : 1;
+ uint64_t thresh_qint_idx : 7;
+ uint64_t rsvd_563 : 1;
+ uint64_t err_qint_idx : 7;
+ uint64_t rsvd_575_571 : 5;
+ uint64_t thresh : 36;
+ uint64_t rsvd_639_612 : 28;
+ uint64_t rsvd_703_640 : 64;/* W10 */
+ uint64_t rsvd_767_704 : 64;/* W11 */
+ uint64_t rsvd_831_768 : 64;/* W12 */
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t rsvd_959_896 : 64;/* W14 */
+ uint64_t rsvd_1023_960 : 64;/* W15 */
+};
+
+/* NPA queue interrupt context hardware structure */
+struct npa_qint_hw_s {
+ uint32_t count : 22;
+ uint32_t rsvd_30_22 : 9;
+ uint32_t ena : 1;
+};
+
+#endif /* __OTX2_NPA_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h
new file mode 100644
index 000000000..efde1e214
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_NPC_HW_H__
+#define __OTX2_NPC_HW_H__
+
+/* Register offsets */
+
+#define NPC_AF_CFG (0x0ull)
+#define NPC_AF_ACTIVE_PC (0x10ull)
+#define NPC_AF_CONST (0x20ull)
+#define NPC_AF_CONST1 (0x30ull)
+#define NPC_AF_BLK_RST (0x40ull)
+#define NPC_AF_MCAM_SCRUB_CTL (0xa0ull)
+#define NPC_AF_KCAM_SCRUB_CTL (0xb0ull)
+#define NPC_AF_KPUX_CFG(a) \
+ (0x500ull | (uint64_t)(a) << 3)
+#define NPC_AF_PCK_CFG (0x600ull)
+#define NPC_AF_PCK_DEF_OL2 (0x610ull)
+#define NPC_AF_PCK_DEF_OIP4 (0x620ull)
+#define NPC_AF_PCK_DEF_OIP6 (0x630ull)
+#define NPC_AF_PCK_DEF_IIP4 (0x640ull)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) \
+ (0x800ull | (uint64_t)(a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) \
+ (0x1010ull | (uint64_t)(a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) \
+ (0x80000ull | (uint64_t)(a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) \
+ (0x80008ull | (uint64_t)(a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) \
+ (0x80020ull | (uint64_t)(a) << 6 | (uint64_t)(b) << 3)
+#define NPC_AF_CHLEN90B_PKIND (0x3bull)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) \
+ (0x180000ull | (uint64_t)(a) << 6 | (uint64_t)(b) << 3)
+#define NPC_AF_CPIX_CFG(a) \
+ (0x200000ull | (uint64_t)(a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 12 | \
+ (uint64_t)(c) << 5 | (uint64_t)(d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 12 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
+ (0x1000000ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
+ (0x1000010ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
+ (0x1000020ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) \
+ (0x1800000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
+ (0x1880000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_MATCH_STATX(a) \
+ (0x1880008ull | (uint64_t)(a) << 8)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) \
+ (0x1880040ull + (uint64_t)(a) * 0x8)
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) \
+ (0x1900000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
+ (0x1900008ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_INTFX_MISS_ACT(a) \
+ (0x1a00000ull | (uint64_t)(a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) \
+ (0x1b00008ull | (uint64_t)(a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) \
+ (0x1c80000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000ull)
+#define NPC_AF_LKUP_DATAX(a) \
+ (0x2000200ull | (uint64_t)(a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) \
+ (0x2000400ull | (uint64_t)(a) << 4)
+#define NPC_AF_INTFX_STAT(a) \
+ (0x2000800ull | (uint64_t)(a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000ull)
+#define NPC_AF_DBG_STATUS (0x3000010ull)
+#define NPC_AF_KPUX_DBG(a) \
+ (0x3000020ull | (uint64_t)(a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080ull)
+#define NPC_AF_KPUX_ERR_CTL(a) \
+ (0x30000a0ull | (uint64_t)(a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000ull)
+#define NPC_AF_DBG_DATAX(a) \
+ (0x3001400ull | (uint64_t)(a) << 4)
+#define NPC_AF_DBG_RESULTX(a) \
+ (0x3001800ull | (uint64_t)(a) << 4)
+
+
+/* Enum offsets */
+
+#define NPC_INTF_NIX0_RX (0x0ull)
+#define NPC_INTF_NIX0_TX (0x1ull)
+
+#define NPC_LKUPOP_PKT (0x0ull)
+#define NPC_LKUPOP_KEY (0x1ull)
+
+#define NPC_MCAM_KEY_X1 (0x0ull)
+#define NPC_MCAM_KEY_X2 (0x1ull)
+#define NPC_MCAM_KEY_X4 (0x2ull)
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
+ NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_IH_8_ETHER,
+ NPC_LT_LA_IH_4_ETHER,
+ NPC_LT_LA_IH_2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CH_LEN_90B_ETHER, /* Custom L2 header of length 90 bytes */
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG_QINQ,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_ITAG,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
+ NPC_LT_LC_IP_OPT,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_FCOE,
+};
+
+/* Don't modify Ltypes up to SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_IGMP = 8,
+ NPC_LT_LD_ESP,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+};
+
+/* Don't modify Ltypes up to SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+};
+
+/* Structures definitions */
+struct npc_kpu_profile_cam {
+ uint8_t state;
+ uint8_t state_mask;
+ uint16_t dp0;
+ uint16_t dp0_mask;
+ uint16_t dp1;
+ uint16_t dp1_mask;
+ uint16_t dp2;
+ uint16_t dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+ uint8_t errlev;
+ uint8_t errcode;
+ uint8_t dp0_offset;
+ uint8_t dp1_offset;
+ uint8_t dp2_offset;
+ uint8_t bypass_count;
+ uint8_t parse_done;
+ uint8_t next_state;
+ uint8_t ptr_advance;
+ uint8_t cap_ena;
+ uint8_t lid;
+ uint8_t ltype;
+ uint8_t flags;
+ uint8_t offset;
+ uint8_t mask;
+ uint8_t right;
+ uint8_t shift;
+};
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+ uint64_t dp0_data : 16;
+ uint64_t dp1_data : 16;
+ uint64_t dp2_data : 16;
+ uint64_t state : 8;
+ uint64_t rsvd_63_56 : 8;
+};
+
+struct npc_kpu_action0 {
+ uint64_t var_len_shift : 3;
+ uint64_t var_len_right : 1;
+ uint64_t var_len_mask : 8;
+ uint64_t var_len_offset : 8;
+ uint64_t ptr_advance : 8;
+ uint64_t capture_flags : 8;
+ uint64_t capture_ltype : 4;
+ uint64_t capture_lid : 3;
+ uint64_t rsvd_43 : 1;
+ uint64_t next_state : 8;
+ uint64_t parse_done : 1;
+ uint64_t capture_ena : 1;
+ uint64_t byp_count : 3;
+ uint64_t rsvd_63_57 : 7;
+};
+
+struct npc_kpu_action1 {
+ uint64_t dp0_offset : 8;
+ uint64_t dp1_offset : 8;
+ uint64_t dp2_offset : 8;
+ uint64_t errcode : 8;
+ uint64_t errlev : 4;
+ uint64_t rsvd_63_36 : 28;
+};
+
+struct npc_kpu_pkind_cpi_def {
+ uint64_t cpi_base : 10;
+ uint64_t rsvd_11_10 : 2;
+ uint64_t add_shift : 3;
+ uint64_t rsvd_15 : 1;
+ uint64_t add_mask : 8;
+ uint64_t add_offset : 8;
+ uint64_t flags_mask : 8;
+ uint64_t flags_match : 8;
+ uint64_t ltype_mask : 4;
+ uint64_t ltype_match : 4;
+ uint64_t lid : 3;
+ uint64_t rsvd_62_59 : 4;
+ uint64_t ena : 1;
+};
+
+struct nix_rx_action {
+ uint64_t op :4;
+ uint64_t pf_func :16;
+ uint64_t index :20;
+ uint64_t match_id :16;
+ uint64_t flow_key_alg :5;
+ uint64_t rsvd_63_61 :3;
+};
+
+struct nix_tx_action {
+ uint64_t op :4;
+ uint64_t rsvd_11_4 :8;
+ uint64_t index :20;
+ uint64_t match_id :16;
+ uint64_t rsvd_63_48 :16;
+};
+
+/* NPC layer parse information structure */
+struct npc_layer_info_s {
+ uint32_t lptr : 8;
+ uint32_t flags : 8;
+ uint32_t ltype : 4;
+ uint32_t rsvd_31_20 : 12;
+};
+
+/* NPC layer mcam search key extract structure */
+struct npc_layer_kex_s {
+ uint16_t flags : 8;
+ uint16_t ltype : 4;
+ uint16_t rsvd_15_12 : 4;
+};
+
+/* NPC mcam search key x1 structure */
+struct npc_mcam_key_x1_s {
+ uint64_t intf : 2;
+ uint64_t rsvd_63_2 : 62;
+ uint64_t kw0 : 64; /* W1 */
+ uint64_t kw1 : 48;
+ uint64_t rsvd_191_176 : 16;
+};
+
+/* NPC mcam search key x2 structure */
+struct npc_mcam_key_x2_s {
+ uint64_t intf : 2;
+ uint64_t rsvd_63_2 : 62;
+ uint64_t kw0 : 64; /* W1 */
+ uint64_t kw1 : 64; /* W2 */
+ uint64_t kw2 : 64; /* W3 */
+ uint64_t kw3 : 32;
+ uint64_t rsvd_319_288 : 32;
+};
+
+/* NPC mcam search key x4 structure */
+struct npc_mcam_key_x4_s {
+ uint64_t intf : 2;
+ uint64_t rsvd_63_2 : 62;
+ uint64_t kw0 : 64; /* W1 */
+ uint64_t kw1 : 64; /* W2 */
+ uint64_t kw2 : 64; /* W3 */
+ uint64_t kw3 : 64; /* W4 */
+ uint64_t kw4 : 64; /* W5 */
+ uint64_t kw5 : 64; /* W6 */
+ uint64_t kw6 : 64; /* W7 */
+};
+
+/* NPC parse key extract structure */
+struct npc_parse_kex_s {
+ uint64_t chan : 12;
+ uint64_t errlev : 4;
+ uint64_t errcode : 8;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t la : 12;
+ uint64_t lb : 12;
+ uint64_t lc : 12;
+ uint64_t ld : 12;
+ uint64_t le : 12;
+ uint64_t lf : 12;
+ uint64_t lg : 12;
+ uint64_t lh : 12;
+ uint64_t rsvd_127_124 : 4;
+};
+
+/* NPC result structure */
+struct npc_result_s {
+ uint64_t intf : 2;
+ uint64_t pkind : 6;
+ uint64_t chan : 12;
+ uint64_t errlev : 4;
+ uint64_t errcode : 8;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t eoh_ptr : 8;
+ uint64_t rsvd_63_44 : 20;
+ uint64_t action : 64; /* W1 */
+ uint64_t vtag_action : 64; /* W2 */
+ uint64_t la : 20;
+ uint64_t lb : 20;
+ uint64_t lc : 20;
+ uint64_t rsvd_255_252 : 4;
+ uint64_t ld : 20;
+ uint64_t le : 20;
+ uint64_t lf : 20;
+ uint64_t rsvd_319_316 : 4;
+ uint64_t lg : 20;
+ uint64_t lh : 20;
+ uint64_t rsvd_383_360 : 24;
+};
+
+#endif /* __OTX2_NPC_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h
new file mode 100644
index 000000000..f2037ec57
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_RVU_HW_H__
+#define __OTX2_RVU_HW_H__
+
+/* Register offsets */
+
+#define RVU_AF_MSIXTR_BASE (0x10ull)
+#define RVU_AF_BLK_RST (0x30ull)
+#define RVU_AF_PF_BAR4_ADDR (0x40ull)
+#define RVU_AF_RAS (0x100ull)
+#define RVU_AF_RAS_W1S (0x108ull)
+#define RVU_AF_RAS_ENA_W1S (0x110ull)
+#define RVU_AF_RAS_ENA_W1C (0x118ull)
+#define RVU_AF_GEN_INT (0x120ull)
+#define RVU_AF_GEN_INT_W1S (0x128ull)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130ull)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138ull)
+#define RVU_AF_AFPFX_MBOXX(a, b) \
+ (0x2000ull | (uint64_t)(a) << 4 | (uint64_t)(b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800ull)
+#define RVU_AF_PFTRPEND (0x2810ull)
+#define RVU_AF_PFTRPEND_W1S (0x2820ull)
+#define RVU_AF_PF_RST (0x2840ull)
+#define RVU_AF_HWVF_RST (0x2850ull)
+#define RVU_AF_PFAF_MBOX_INT (0x2880ull)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888ull)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890ull)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898ull)
+#define RVU_AF_PFFLR_INT (0x28a0ull)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8ull)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0ull)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8ull)
+#define RVU_AF_PFME_INT (0x28c0ull)
+#define RVU_AF_PFME_INT_W1S (0x28c8ull)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0ull)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8ull)
+#define RVU_PRIV_CONST (0x8000000ull)
+#define RVU_PRIV_GEN_CFG (0x8000010ull)
+#define RVU_PRIV_CLK_CFG (0x8000020ull)
+#define RVU_PRIV_ACTIVE_PC (0x8000030ull)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_NIXX_CFG(a, b) \
+ (0x8000300ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define RVU_PRIV_PFX_NPA_CFG(a) (0x8000310ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_SSO_CFG(a) (0x8000320ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_SSOW_CFG(a) (0x8000330ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_TIM_CFG(a) (0x8000340ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_CPTX_CFG(a, b) \
+ (0x8000350ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400ull | (uint64_t)(a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a, b) \
+ (0x8001300ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define RVU_PRIV_HWVFX_NPA_CFG(a) (0x8001310ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_SSO_CFG(a) (0x8001320ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_SSOW_CFG(a) (0x8001330ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_TIM_CFG(a) (0x8001340ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a, b) \
+ (0x8001350ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) \
+ (0x0ull | (uint64_t)(a) << 12 | (uint64_t)(b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10ull)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8a0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8c0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8e0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9a0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9c0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9e0ull | (uint64_t)(a) << 3)
+#define RVU_PF_PFAF_MBOXX(a) (0xc00ull | (uint64_t)(a) << 3)
+#define RVU_PF_INT (0xc20ull)
+#define RVU_PF_INT_W1S (0xc28ull)
+#define RVU_PF_INT_ENA_W1S (0xc30ull)
+#define RVU_PF_INT_ENA_W1C (0xc38ull)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x80000ull | (uint64_t)(a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x80008ull | (uint64_t)(a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xf0000ull | (uint64_t)(a) << 3)
+#define RVU_VF_VFPF_MBOXX(a) (0x0ull | (uint64_t)(a) << 3)
+#define RVU_VF_INT (0x20ull)
+#define RVU_VF_INT_W1S (0x28ull)
+#define RVU_VF_INT_ENA_W1S (0x30ull)
+#define RVU_VF_INT_ENA_W1C (0x38ull)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200ull | (uint64_t)(a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x80000ull | (uint64_t)(a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x80008ull | (uint64_t)(a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xf0000ull | (uint64_t)(a) << 3)
+
+
+/* Enum offsets */
+
+#define RVU_BAR_RVU_PF_END_BAR0 (0x84f000000000ull)
+#define RVU_BAR_RVU_PF_START_BAR0 (0x840000000000ull)
+#define RVU_BAR_RVU_PFX_FUNCX_BAR2(a, b) \
+ (0x840200000000ull | ((uint64_t)(a) << 36) | ((uint64_t)(b) << 25))
+
+#define RVU_AF_INT_VEC_POISON (0x0ull)
+#define RVU_AF_INT_VEC_PFFLR (0x1ull)
+#define RVU_AF_INT_VEC_PFME (0x2ull)
+#define RVU_AF_INT_VEC_GEN (0x3ull)
+#define RVU_AF_INT_VEC_MBOX (0x4ull)
+
+#define RVU_BLOCK_TYPE_RVUM (0x0ull)
+#define RVU_BLOCK_TYPE_LMT (0x2ull)
+#define RVU_BLOCK_TYPE_NIX (0x3ull)
+#define RVU_BLOCK_TYPE_NPA (0x4ull)
+#define RVU_BLOCK_TYPE_NPC (0x5ull)
+#define RVU_BLOCK_TYPE_SSO (0x6ull)
+#define RVU_BLOCK_TYPE_SSOW (0x7ull)
+#define RVU_BLOCK_TYPE_TIM (0x8ull)
+#define RVU_BLOCK_TYPE_CPT (0x9ull)
+#define RVU_BLOCK_TYPE_NDC (0xaull)
+#define RVU_BLOCK_TYPE_DDF (0xbull)
+#define RVU_BLOCK_TYPE_ZIP (0xcull)
+#define RVU_BLOCK_TYPE_RAD (0xdull)
+#define RVU_BLOCK_TYPE_DFA (0xeull)
+#define RVU_BLOCK_TYPE_HNA (0xfull)
+
+#define RVU_BLOCK_ADDR_RVUM (0x0ull)
+#define RVU_BLOCK_ADDR_LMT (0x1ull)
+#define RVU_BLOCK_ADDR_NPA (0x3ull)
+#define RVU_BLOCK_ADDR_NPC (0x6ull)
+#define RVU_BLOCK_ADDR_SSO (0x7ull)
+#define RVU_BLOCK_ADDR_SSOW (0x8ull)
+#define RVU_BLOCK_ADDR_TIM (0x9ull)
+#define RVU_BLOCK_ADDR_NIX0 (0x4ull)
+#define RVU_BLOCK_ADDR_CPT0 (0xaull)
+#define RVU_BLOCK_ADDR_NDC0 (0xcull)
+#define RVU_BLOCK_ADDR_NDC1 (0xdull)
+#define RVU_BLOCK_ADDR_NDC2 (0xeull)
+#define RVU_BLOCK_ADDR_R_END (0x1full)
+#define RVU_BLOCK_ADDR_R_START (0x14ull)
+
+#define RVU_VF_INT_VEC_MBOX (0x0ull)
+
+#define RVU_PF_INT_VEC_AFPF_MBOX (0x6ull)
+#define RVU_PF_INT_VEC_VFFLR0 (0x0ull)
+#define RVU_PF_INT_VEC_VFFLR1 (0x1ull)
+#define RVU_PF_INT_VEC_VFME0 (0x2ull)
+#define RVU_PF_INT_VEC_VFME1 (0x3ull)
+#define RVU_PF_INT_VEC_VFPF_MBOX0 (0x4ull)
+#define RVU_PF_INT_VEC_VFPF_MBOX1 (0x5ull)
+
+
+#define AF_BAR2_ALIASX_SIZE (0x100000ull)
+
+#define TIM_AF_BAR2_SEL (0x9000000ull)
+#define SSO_AF_BAR2_SEL (0x9000000ull)
+#define NIX_AF_BAR2_SEL (0x9000000ull)
+#define SSOW_AF_BAR2_SEL (0x9000000ull)
+#define NPA_AF_BAR2_SEL (0x9000000ull)
+#define CPT_AF_BAR2_SEL (0x9000000ull)
+#define RVU_AF_BAR2_SEL (0x9000000ull)
+
+#define AF_BAR2_ALIASX(a, b) \
+ (0x9100000ull | (uint64_t)(a) << 12 | (uint64_t)(b))
+#define TIM_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define SSO_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define NIX_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
+#define SSOW_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define NPA_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
+#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define RVU_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
+/* Structures definitions */
+
+/* RVU admin function register address structure */
+struct rvu_af_addr_s {
+ uint64_t addr : 28;
+ uint64_t block : 5;
+ uint64_t rsvd_63_33 : 31;
+};
+
+/* RVU function-unique address structure */
+struct rvu_func_addr_s {
+ uint32_t addr : 12;
+ uint32_t lf_slot : 8;
+ uint32_t block : 5;
+ uint32_t rsvd_31_25 : 7;
+};
+
+/* RVU msi-x vector structure */
+struct rvu_msix_vec_s {
+ uint64_t addr : 64; /* W0 */
+ uint64_t data : 32;
+ uint64_t mask : 1;
+ uint64_t pend : 1;
+ uint64_t rsvd_127_98 : 30;
+};
+
+/* RVU pf function identification structure */
+struct rvu_pf_func_s {
+ uint16_t func : 10;
+ uint16_t pf : 6;
+};
+
+#endif /* __OTX2_RVU_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h
new file mode 100644
index 000000000..1e690f8b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SDP_HW_H_
+#define __OTX2_SDP_HW_H_
+
+/* SDP VF IOQs */
+#define SDP_MIN_RINGS_PER_VF (1)
+#define SDP_MAX_RINGS_PER_VF (8)
+
+/* SDP VF IQ configuration */
+#define SDP_VF_MAX_IQ_DESCRIPTORS (512)
+#define SDP_VF_MIN_IQ_DESCRIPTORS (128)
+
+#define SDP_VF_DB_MIN (1)
+#define SDP_VF_DB_TIMEOUT (1)
+#define SDP_VF_INTR_THRESHOLD (0xFFFFFFFF)
+
+#define SDP_VF_64BYTE_INSTR (64)
+#define SDP_VF_32BYTE_INSTR (32)
+
+/* SDP VF OQ configuration */
+#define SDP_VF_MAX_OQ_DESCRIPTORS (512)
+#define SDP_VF_MIN_OQ_DESCRIPTORS (128)
+#define SDP_VF_OQ_BUF_SIZE (2048)
+#define SDP_VF_OQ_REFIL_THRESHOLD (16)
+
+#define SDP_VF_OQ_INFOPTR_MODE (1)
+#define SDP_VF_OQ_BUFPTR_MODE (0)
+
+#define SDP_VF_OQ_INTR_PKT (1)
+#define SDP_VF_OQ_INTR_TIME (10)
+#define SDP_VF_CFG_IO_QUEUES SDP_MAX_RINGS_PER_VF
+
+/* Wait time in milliseconds for FLR */
+#define SDP_VF_PCI_FLR_WAIT (100)
+#define SDP_VF_BUSY_LOOP_COUNT (10000)
+
+#define SDP_VF_MAX_IO_QUEUES SDP_MAX_RINGS_PER_VF
+#define SDP_VF_MIN_IO_QUEUES SDP_MIN_RINGS_PER_VF
+
+/* SDP VF IOQs per rawdev */
+#define SDP_VF_MAX_IOQS_PER_RAWDEV SDP_VF_MAX_IO_QUEUES
+#define SDP_VF_DEFAULT_IOQS_PER_RAWDEV SDP_VF_MIN_IO_QUEUES
+
+/* SDP VF Register definitions */
+#define SDP_VF_RING_OFFSET (0x1ull << 17)
+
+/* SDP VF IQ Registers */
+#define SDP_VF_R_IN_CONTROL_START (0x10000)
+#define SDP_VF_R_IN_ENABLE_START (0x10010)
+#define SDP_VF_R_IN_INSTR_BADDR_START (0x10020)
+#define SDP_VF_R_IN_INSTR_RSIZE_START (0x10030)
+#define SDP_VF_R_IN_INSTR_DBELL_START (0x10040)
+#define SDP_VF_R_IN_CNTS_START (0x10050)
+#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
+#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
+#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
+
+#define SDP_VF_R_IN_CONTROL(ring) \
+ (SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_ENABLE(ring) \
+ (SDP_VF_R_IN_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INSTR_BADDR(ring) \
+ (SDP_VF_R_IN_INSTR_BADDR_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INSTR_RSIZE(ring) \
+ (SDP_VF_R_IN_INSTR_RSIZE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INSTR_DBELL(ring) \
+ (SDP_VF_R_IN_INSTR_DBELL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_CNTS(ring) \
+ (SDP_VF_R_IN_CNTS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INT_LEVELS(ring) \
+ (SDP_VF_R_IN_INT_LEVELS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_PKT_CNT(ring) \
+ (SDP_VF_R_IN_PKT_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_BYTE_CNT(ring) \
+ (SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+/* SDP VF IQ Masks */
+#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define SDP_VF_R_IN_CTL_RPVF_POS (48)
+
+#define SDP_VF_R_IN_CTL_IDLE (0x1ull << 28)
+#define SDP_VF_R_IN_CTL_RDSIZE (0x3ull << 25) /* Setting to max(4) */
+#define SDP_VF_R_IN_CTL_IS_64B (0x1ull << 24)
+#define SDP_VF_R_IN_CTL_D_NSR (0x1ull << 8)
+#define SDP_VF_R_IN_CTL_D_ESR (0x1ull << 6)
+#define SDP_VF_R_IN_CTL_D_ROR (0x1ull << 5)
+#define SDP_VF_R_IN_CTL_NSR (0x1ull << 3)
+#define SDP_VF_R_IN_CTL_ESR (0x1ull << 1)
+#define SDP_VF_R_IN_CTL_ROR (0x1ull << 0)
+
+#define SDP_VF_R_IN_CTL_MASK \
+ (SDP_VF_R_IN_CTL_RDSIZE | SDP_VF_R_IN_CTL_IS_64B)
+
+/* SDP VF OQ Registers */
+#define SDP_VF_R_OUT_CNTS_START (0x10100)
+#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
+#define SDP_VF_R_OUT_SLIST_BADDR_START (0x10120)
+#define SDP_VF_R_OUT_SLIST_RSIZE_START (0x10130)
+#define SDP_VF_R_OUT_SLIST_DBELL_START (0x10140)
+#define SDP_VF_R_OUT_CONTROL_START (0x10150)
+#define SDP_VF_R_OUT_ENABLE_START (0x10160)
+#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
+#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
+
+#define SDP_VF_R_OUT_CONTROL(ring) \
+ (SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_ENABLE(ring) \
+ (SDP_VF_R_OUT_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_SLIST_BADDR(ring) \
+ (SDP_VF_R_OUT_SLIST_BADDR_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_SLIST_RSIZE(ring) \
+ (SDP_VF_R_OUT_SLIST_RSIZE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_SLIST_DBELL(ring) \
+ (SDP_VF_R_OUT_SLIST_DBELL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_CNTS(ring) \
+ (SDP_VF_R_OUT_CNTS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_INT_LEVELS(ring) \
+ (SDP_VF_R_OUT_INT_LEVELS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_PKT_CNT(ring) \
+ (SDP_VF_R_OUT_PKT_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_BYTE_CNT(ring) \
+ (SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+/* SDP VF OQ Masks */
+#define SDP_VF_R_OUT_CTL_IDLE (1ull << 40)
+#define SDP_VF_R_OUT_CTL_ES_I (1ull << 34)
+#define SDP_VF_R_OUT_CTL_NSR_I (1ull << 33)
+#define SDP_VF_R_OUT_CTL_ROR_I (1ull << 32)
+#define SDP_VF_R_OUT_CTL_ES_D (1ull << 30)
+#define SDP_VF_R_OUT_CTL_NSR_D (1ull << 29)
+#define SDP_VF_R_OUT_CTL_ROR_D (1ull << 28)
+#define SDP_VF_R_OUT_CTL_ES_P (1ull << 26)
+#define SDP_VF_R_OUT_CTL_NSR_P (1ull << 25)
+#define SDP_VF_R_OUT_CTL_ROR_P (1ull << 24)
+#define SDP_VF_R_OUT_CTL_IMODE (1ull << 23)
+
+#define SDP_VF_R_OUT_INT_LEVELS_BMODE (1ull << 63)
+#define SDP_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+/* SDP Instruction Header */
+struct sdp_instr_ih {
+ /* Data Len */
+ uint64_t tlen:16;
+
+ /* Reserved1 */
+ uint64_t rsvd1:20;
+
+ /* PKIND for SDP */
+ uint64_t pkind:6;
+
+ /* Front Data size */
+ uint64_t fsz:6;
+
+ /* No. of entries in gather list */
+ uint64_t gsz:14;
+
+ /* Gather indicator */
+ uint64_t gather:1;
+
+ /* Reserved2 */
+ uint64_t rsvd2:1;
+} __rte_packed;
+
+#endif /* __OTX2_SDP_HW_H_ */
+
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h
new file mode 100644
index 000000000..98a8130b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SSO_HW_H__
+#define __OTX2_SSO_HW_H__
+
+/* Register offsets */
+
+#define SSO_AF_CONST (0x1000ull)
+#define SSO_AF_CONST1 (0x1008ull)
+#define SSO_AF_WQ_INT_PC (0x1020ull)
+#define SSO_AF_NOS_CNT (0x1050ull)
+#define SSO_AF_AW_WE (0x1080ull)
+#define SSO_AF_WS_CFG (0x1088ull)
+#define SSO_AF_GWE_CFG (0x1098ull)
+#define SSO_AF_GWE_RANDOM (0x10b0ull)
+#define SSO_AF_LF_HWGRP_RST (0x10e0ull)
+#define SSO_AF_AW_CFG (0x10f0ull)
+#define SSO_AF_BLK_RST (0x10f8ull)
+#define SSO_AF_ACTIVE_CYCLES0 (0x1100ull)
+#define SSO_AF_ACTIVE_CYCLES1 (0x1108ull)
+#define SSO_AF_ACTIVE_CYCLES2 (0x1110ull)
+#define SSO_AF_ERR0 (0x1220ull)
+#define SSO_AF_ERR0_W1S (0x1228ull)
+#define SSO_AF_ERR0_ENA_W1C (0x1230ull)
+#define SSO_AF_ERR0_ENA_W1S (0x1238ull)
+#define SSO_AF_ERR2 (0x1260ull)
+#define SSO_AF_ERR2_W1S (0x1268ull)
+#define SSO_AF_ERR2_ENA_W1C (0x1270ull)
+#define SSO_AF_ERR2_ENA_W1S (0x1278ull)
+#define SSO_AF_UNMAP_INFO (0x12f0ull)
+#define SSO_AF_UNMAP_INFO2 (0x1300ull)
+#define SSO_AF_UNMAP_INFO3 (0x1310ull)
+#define SSO_AF_RAS (0x1420ull)
+#define SSO_AF_RAS_W1S (0x1430ull)
+#define SSO_AF_RAS_ENA_W1C (0x1460ull)
+#define SSO_AF_RAS_ENA_W1S (0x1470ull)
+#define SSO_AF_AW_INP_CTL (0x2070ull)
+#define SSO_AF_AW_ADD (0x2080ull)
+#define SSO_AF_AW_READ_ARB (0x2090ull)
+#define SSO_AF_XAQ_REQ_PC (0x20b0ull)
+#define SSO_AF_XAQ_LATENCY_PC (0x20b8ull)
+#define SSO_AF_TAQ_CNT (0x20c0ull)
+#define SSO_AF_TAQ_ADD (0x20e0ull)
+#define SSO_AF_POISONX(a) (0x2100ull | (uint64_t)(a) << 3)
+#define SSO_AF_POISONX_W1S(a) (0x2200ull | (uint64_t)(a) << 3)
+#define SSO_PRIV_AF_INT_CFG (0x3000ull)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800ull)
+#define SSO_PRIV_LFX_HWGRP_CFG(a) (0x10000ull | (uint64_t)(a) << 3)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG(a) (0x20000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IU_ACCNTX_CFG(a) (0x50000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IU_ACCNTX_RST(a) (0x60000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_HEAD_PTR(a) (0x80000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_TAIL_PTR(a) (0x90000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_HEAD_NEXT(a) (0xa0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_TAIL_NEXT(a) (0xb0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TIAQX_STATUS(a) (0xc0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TOAQX_STATUS(a) (0xd0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_GMCTL(a) (0xe0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_HWGRPX_IAQ_THR(a) (0x200000ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_TAQ_THR(a) (0x200010ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_PRI(a) (0x200020ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_WS_PC(a) (0x200050ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_EXT_PC(a) (0x200060ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_WA_PC(a) (0x200070ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_TS_PC(a) (0x200080ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_DS_PC(a) (0x200090ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_DQ_PC(a) (0x2000A0ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_PAGE_CNT(a) (0x200100ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_AW_STATUS(a) (0x200110ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_AW_CFG(a) (0x200120ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_AW_TAGSPACE(a) (0x200130ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_XAQ_AURA(a) (0x200140ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_XAQ_LIMIT(a) (0x200220ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_IU_ACCNT(a) (0x200230ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_ARB(a) (0x400100ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_INV(a) (0x400180ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_GMCTL(a) (0x400200ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_SX_GRPMSKX(a, b, c) \
+ (0x400400ull | (uint64_t)(a) << 12 | (uint64_t)(b) << 5 | \
+ (uint64_t)(c) << 3)
+#define SSO_AF_IPL_FREEX(a) (0x800000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IPL_IAQX(a) (0x840000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IPL_DESCHEDX(a) (0x860000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IPL_CONFX(a) (0x880000ull | (uint64_t)(a) << 3)
+#define SSO_AF_NPA_DIGESTX(a) (0x900000ull | (uint64_t)(a) << 3)
+#define SSO_AF_NPA_DIGESTX_W1S(a) (0x900100ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFP_DIGESTX(a) (0x900200ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFP_DIGESTX_W1S(a) (0x900300ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFPN_DIGESTX(a) (0x900400ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFPN_DIGESTX_W1S(a) (0x900500ull | (uint64_t)(a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX(a) (0x900600ull | (uint64_t)(a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX_W1S(a) (0x900700ull | (uint64_t)(a) << 3)
+#define SSO_AF_AWEMPTY_DIGESTX(a) (0x900800ull | (uint64_t)(a) << 3)
+#define SSO_AF_AWEMPTY_DIGESTX_W1S(a) (0x900900ull | (uint64_t)(a) << 3)
+#define SSO_AF_WQP0_DIGESTX(a) (0x900a00ull | (uint64_t)(a) << 3)
+#define SSO_AF_WQP0_DIGESTX_W1S(a) (0x900b00ull | (uint64_t)(a) << 3)
+#define SSO_AF_AW_DROPPED_DIGESTX(a) (0x900c00ull | (uint64_t)(a) << 3)
+#define SSO_AF_AW_DROPPED_DIGESTX_W1S(a) (0x900d00ull | (uint64_t)(a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX(a) (0x900e00ull | (uint64_t)(a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX_W1S(a) (0x900f00ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX(a) (0x901000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX_W1S(a) (0x901100ull | (uint64_t)(a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX(a) (0x901200ull | (uint64_t)(a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX_W1S(a) (0x901300ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GMULTI_DIGESTX(a) (0x902000ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GMULTI_DIGESTX_W1S(a) (0x902100ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GUNMAP_DIGESTX(a) (0x902200ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GUNMAP_DIGESTX_W1S(a) (0x902300ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_AWE_DIGESTX(a) (0x902400ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_AWE_DIGESTX_W1S(a) (0x902500ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GWI_DIGESTX(a) (0x902600ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GWI_DIGESTX_W1S(a) (0x902700ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_NE_DIGESTX(a) (0x902800ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_NE_DIGESTX_W1S(a) (0x902900ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_TAG(a) (0xa00000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_GRP(a) (0xa20000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_PENDTAG(a) (0xa40000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_LINKS(a) (0xa60000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_QLINKS(a) (0xa80000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_WQP(a) (0xaa0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TAQX_LINK(a) (0xc00000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TAQX_WAEX_TAG(a, b) \
+ (0xe00000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define SSO_AF_TAQX_WAEX_WQP(a, b) \
+ (0xe00008ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+
+#define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
+#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
+#define SSO_LF_GGRP_QCTL (0x20ull)
+#define SSO_LF_GGRP_EXE_DIS (0x80ull)
+#define SSO_LF_GGRP_INT (0x100ull)
+#define SSO_LF_GGRP_INT_W1S (0x108ull)
+#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
+#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
+#define SSO_LF_GGRP_INT_THR (0x140ull)
+#define SSO_LF_GGRP_INT_CNT (0x180ull)
+#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
+#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
+#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
+#define SSO_LF_GGRP_MISC_CNT (0x200ull)
+
+#define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_IAQ_FREE_CNT_MAX SSO_AF_IAQ_FREE_CNT_MASK
+#define SSO_AF_AW_ADD_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_AW_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_IAQ_MAX_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_RSVD_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_IAQ_RSVD_THR 0x2
+
+#define SSO_AF_TAQ_FREE_CNT_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_TAQ_FREE_CNT_MAX SSO_AF_TAQ_FREE_CNT_MASK
+#define SSO_AF_TAQ_ADD_RSVD_FREE_MASK 0x1FFFull
+#define SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_TAQ_MAX_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_RSVD_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_TAQ_RSVD_THR 0x3
+
+#define SSO_HWGRP_PRI_AFF_MASK 0xFull
+#define SSO_HWGRP_PRI_AFF_SHIFT 8
+#define SSO_HWGRP_PRI_WGT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_SHIFT 16
+#define SSO_HWGRP_PRI_WGT_LEFT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_LEFT_SHIFT 24
+
+#define SSO_HWGRP_AW_CFG_RWEN BIT_ULL(0)
+#define SSO_HWGRP_AW_CFG_LDWB BIT_ULL(1)
+#define SSO_HWGRP_AW_CFG_LDT BIT_ULL(2)
+#define SSO_HWGRP_AW_CFG_STT BIT_ULL(3)
+#define SSO_HWGRP_AW_CFG_XAQ_BYP_DIS BIT_ULL(4)
+
+#define SSO_HWGRP_AW_STS_TPTR_VLD BIT_ULL(8)
+#define SSO_HWGRP_AW_STS_NPA_FETCH BIT_ULL(9)
+#define SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK 0x7ull
+#define SSO_HWGRP_AW_STS_INIT_STS 0x18ull
+
+/* Enum offsets */
+
+#define SSO_LF_INT_VEC_GRP (0x0ull)
+
+#define SSO_AF_INT_VEC_ERR0 (0x0ull)
+#define SSO_AF_INT_VEC_ERR2 (0x1ull)
+#define SSO_AF_INT_VEC_RAS (0x2ull)
+
+#define SSO_WA_IOBN (0x0ull)
+#define SSO_WA_NIXRX (0x1ull)
+#define SSO_WA_CPT (0x2ull)
+#define SSO_WA_ADDWQ (0x3ull)
+#define SSO_WA_DPI (0x4ull)
+#define SSO_WA_NIXTX (0x5ull)
+#define SSO_WA_TIM (0x6ull)
+#define SSO_WA_ZIP (0x7ull)
+
+#define SSO_TT_ORDERED (0x0ull)
+#define SSO_TT_ATOMIC (0x1ull)
+#define SSO_TT_UNTAGGED (0x2ull)
+#define SSO_TT_EMPTY (0x3ull)
+
+
+/* Structures definitions */
+
+#endif /* __OTX2_SSO_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h
new file mode 100644
index 000000000..8a4457803
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SSOW_HW_H__
+#define __OTX2_SSOW_HW_H__
+
+/* Register offsets */
+
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x10ull)
+#define SSOW_AF_LF_HWS_RST (0x30ull)
+#define SSOW_PRIV_LFX_HWS_CFG(a) (0x1000ull | (uint64_t)(a) << 3)
+#define SSOW_PRIV_LFX_HWS_INT_CFG(a) (0x2000ull | (uint64_t)(a) << 3)
+#define SSOW_AF_SCRATCH_WS (0x100000ull)
+#define SSOW_AF_SCRATCH_GW (0x200000ull)
+#define SSOW_AF_SCRATCH_AW (0x300000ull)
+
+#define SSOW_LF_GWS_LINKS (0x10ull)
+#define SSOW_LF_GWS_PENDWQP (0x40ull)
+#define SSOW_LF_GWS_PENDSTATE (0x50ull)
+#define SSOW_LF_GWS_NW_TIM (0x70ull)
+#define SSOW_LF_GWS_GRPMSK_CHG (0x80ull)
+#define SSOW_LF_GWS_INT (0x100ull)
+#define SSOW_LF_GWS_INT_W1S (0x108ull)
+#define SSOW_LF_GWS_INT_ENA_W1S (0x110ull)
+#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
+#define SSOW_LF_GWS_TAG (0x200ull)
+#define SSOW_LF_GWS_WQP (0x210ull)
+#define SSOW_LF_GWS_SWTP (0x220ull)
+#define SSOW_LF_GWS_PENDTAG (0x230ull)
+#define SSOW_LF_GWS_OP_ALLOC_WE (0x400ull)
+#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
+#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
+#define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull)
+#define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull)
+#define SSOW_LF_GWS_OP_UPD_WQP_GRP0 (0x830ull)
+#define SSOW_LF_GWS_OP_UPD_WQP_GRP1 (0x838ull)
+#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
+#define SSOW_LF_GWS_OP_DESCHED_NOSCH (0x8c0ull)
+#define SSOW_LF_GWS_OP_SWTAG_DESCHED (0x980ull)
+#define SSOW_LF_GWS_OP_SWTAG_NOSCHED (0x9c0ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xa00ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED1 (0xa08ull)
+#define SSOW_LF_GWS_OP_SWTP_SET (0xc00ull)
+#define SSOW_LF_GWS_OP_SWTAG_NORM (0xc10ull)
+#define SSOW_LF_GWS_OP_SWTAG_FULL0 (0xc20ull)
+#define SSOW_LF_GWS_OP_SWTAG_FULL1 (0xc28ull)
+#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
+
+
+/* Enum offsets */
+
+#define SSOW_LF_INT_VEC_IOP (0x0ull)
+
+
+#endif /* __OTX2_SSOW_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h
new file mode 100644
index 000000000..41442ad0a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_TIM_HW_H__
+#define __OTX2_TIM_HW_H__
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG(a) (0x20000 | (a) << 3)
+#define TIM_PRIV_LFX_INT_CFG(a) (0x24000 | (a) << 3)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_RINGX_GMCTL(a) (0x2000 | (a) << 3)
+#define TIM_AF_RINGX_CTL0(a) (0x4000 | (a) << 3)
+#define TIM_AF_RINGX_CTL1(a) (0x6000 | (a) << 3)
+#define TIM_AF_RINGX_CTL2(a) (0x8000 | (a) << 3)
+#define TIM_AF_FLAGS_REG (0x80)
+#define TIM_AF_FLAGS_REG_ENA_TIM BIT_ULL(0)
+#define TIM_AF_RINGX_CTL1_ENA BIT_ULL(47)
+#define TIM_AF_RINGX_CTL1_RCF_BUSY BIT_ULL(50)
+#define TIM_AF_RINGX_CLT1_CLK_10NS (0)
+#define TIM_AF_RINGX_CLT1_CLK_GPIO (1)
+#define TIM_AF_RINGX_CLT1_CLK_GTI (2)
+#define TIM_AF_RINGX_CLT1_CLK_PTP (3)
+
+/* ENUMS */
+
+#define TIM_LF_INT_VEC_NRSPERR_INT (0x0ull)
+#define TIM_LF_INT_VEC_RAS_INT (0x1ull)
+
+#endif /* __OTX2_TIM_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/meson.build b/src/spdk/dpdk/drivers/common/octeontx2/meson.build
new file mode 100644
index 000000000..f2c04342e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/meson.build
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+sources= files('otx2_dev.c',
+ 'otx2_irq.c',
+ 'otx2_mbox.c',
+ 'otx2_common.c',
+ 'otx2_sec_idev.c',
+ )
+
+extra_flags = []
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+deps = ['eal', 'pci', 'ethdev', 'kvargs']
+includes += include_directories('../../common/octeontx2',
+ '../../mempool/octeontx2', '../../bus/pci')
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c
new file mode 100644
index 000000000..5e7272f69
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "otx2_common.h"
+#include "otx2_dev.h"
+#include "otx2_mbox.h"
+
+/**
+ * @internal
+ * Set default NPA configuration.
+ */
+void
+otx2_npa_set_defaults(struct otx2_idev_cfg *idev)
+{
+ idev->npa_pf_func = 0;
+ rte_atomic16_set(&idev->npa_refcnt, 0);
+}
+
+/**
+ * @internal
+ * Get intra device config structure.
+ */
+struct otx2_idev_cfg *
+otx2_intra_dev_get_cfg(void)
+{
+ const char name[] = "octeontx2_intra_device_conf";
+ const struct rte_memzone *mz;
+ struct otx2_idev_cfg *idev;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_cfg),
+ SOCKET_ID_ANY, 0, OTX2_ALIGN);
+ if (mz != NULL) {
+ idev = mz->addr;
+ idev->sso_pf_func = 0;
+ idev->npa_lf = NULL;
+ otx2_npa_set_defaults(idev);
+ return idev;
+ }
+ return NULL;
+}
+
+/**
+ * @internal
+ * Get SSO PF_FUNC.
+ */
+uint16_t
+otx2_sso_pf_func_get(void)
+{
+ struct otx2_idev_cfg *idev;
+ uint16_t sso_pf_func;
+
+ sso_pf_func = 0;
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL)
+ sso_pf_func = idev->sso_pf_func;
+
+ return sso_pf_func;
+}
+
+/**
+ * @internal
+ * Set SSO PF_FUNC.
+ */
+void
+otx2_sso_pf_func_set(uint16_t sso_pf_func)
+{
+ struct otx2_idev_cfg *idev;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL) {
+ idev->sso_pf_func = sso_pf_func;
+ rte_smp_wmb();
+ }
+}
+
+/**
+ * @internal
+ * Get NPA PF_FUNC.
+ */
+uint16_t
+otx2_npa_pf_func_get(void)
+{
+ struct otx2_idev_cfg *idev;
+ uint16_t npa_pf_func;
+
+ npa_pf_func = 0;
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL)
+ npa_pf_func = idev->npa_pf_func;
+
+ return npa_pf_func;
+}
+
+/**
+ * @internal
+ * Get NPA lf object.
+ */
+struct otx2_npa_lf *
+otx2_npa_lf_obj_get(void)
+{
+ struct otx2_idev_cfg *idev;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL && rte_atomic16_read(&idev->npa_refcnt))
+ return idev->npa_lf;
+
+ return NULL;
+}
+
+/**
+ * @internal
+ * Is NPA lf active for the given device?.
+ */
+int
+otx2_npa_lf_active(void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ struct otx2_idev_cfg *idev;
+
+ /* Check if npalf is actively used on this dev */
+ idev = otx2_intra_dev_get_cfg();
+ if (!idev || !idev->npa_lf || idev->npa_lf->mbox != dev->mbox)
+ return 0;
+
+ return rte_atomic16_read(&idev->npa_refcnt);
+}
+
+/*
+ * @internal
+ * Gets reference only to existing NPA LF object.
+ */
+int otx2_npa_lf_obj_ref(void)
+{
+ struct otx2_idev_cfg *idev;
+ uint16_t cnt;
+ int rc;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ /* Check if ref not possible */
+ if (idev == NULL)
+ return -EINVAL;
+
+
+ /* Get ref only if > 0 */
+ cnt = rte_atomic16_read(&idev->npa_refcnt);
+ while (cnt != 0) {
+ rc = rte_atomic16_cmpset(&idev->npa_refcnt_u16, cnt, cnt + 1);
+ if (rc)
+ break;
+
+ cnt = rte_atomic16_read(&idev->npa_refcnt);
+ }
+
+ return cnt ? 0 : -EINVAL;
+}
+
+static int
+parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint64_t val;
+
+ val = strtoull(value, NULL, 16);
+
+ *(uint64_t *)extra_args = val;
+
+ return 0;
+}
+
+/*
+ * @internal
+ * Parse common device arguments
+ */
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
+{
+
+ struct otx2_idev_cfg *idev;
+ uint64_t npa_lock_mask = 0;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
+ &parse_npa_lock_mask, &npa_lock_mask);
+
+ idev->npa_lock_mask = npa_lock_mask;
+}
+
+/**
+ * @internal
+ */
+int otx2_logtype_base;
+/**
+ * @internal
+ */
+int otx2_logtype_mbox;
+/**
+ * @internal
+ */
+int otx2_logtype_npa;
+/**
+ * @internal
+ */
+int otx2_logtype_nix;
+/**
+ * @internal
+ */
+int otx2_logtype_npc;
+/**
+ * @internal
+ */
+int otx2_logtype_tm;
+/**
+ * @internal
+ */
+int otx2_logtype_sso;
+/**
+ * @internal
+ */
+int otx2_logtype_tim;
+/**
+ * @internal
+ */
+int otx2_logtype_dpi;
+/**
+ * @internal
+ */
+int otx2_logtype_ep;
+
+RTE_INIT(otx2_log_init);
+static void
+otx2_log_init(void)
+{
+ otx2_logtype_base = rte_log_register("pmd.octeontx2.base");
+ if (otx2_logtype_base >= 0)
+ rte_log_set_level(otx2_logtype_base, RTE_LOG_NOTICE);
+
+ otx2_logtype_mbox = rte_log_register("pmd.octeontx2.mbox");
+ if (otx2_logtype_mbox >= 0)
+ rte_log_set_level(otx2_logtype_mbox, RTE_LOG_NOTICE);
+
+ otx2_logtype_npa = rte_log_register("pmd.mempool.octeontx2");
+ if (otx2_logtype_npa >= 0)
+ rte_log_set_level(otx2_logtype_npa, RTE_LOG_NOTICE);
+
+ otx2_logtype_nix = rte_log_register("pmd.net.octeontx2");
+ if (otx2_logtype_nix >= 0)
+ rte_log_set_level(otx2_logtype_nix, RTE_LOG_NOTICE);
+
+ otx2_logtype_npc = rte_log_register("pmd.net.octeontx2.flow");
+ if (otx2_logtype_npc >= 0)
+ rte_log_set_level(otx2_logtype_npc, RTE_LOG_NOTICE);
+
+ otx2_logtype_tm = rte_log_register("pmd.net.octeontx2.tm");
+ if (otx2_logtype_tm >= 0)
+ rte_log_set_level(otx2_logtype_tm, RTE_LOG_NOTICE);
+
+ otx2_logtype_sso = rte_log_register("pmd.event.octeontx2");
+ if (otx2_logtype_sso >= 0)
+ rte_log_set_level(otx2_logtype_sso, RTE_LOG_NOTICE);
+
+ otx2_logtype_tim = rte_log_register("pmd.event.octeontx2.timer");
+ if (otx2_logtype_tim >= 0)
+ rte_log_set_level(otx2_logtype_tim, RTE_LOG_NOTICE);
+
+ otx2_logtype_dpi = rte_log_register("pmd.raw.octeontx2.dpi");
+ if (otx2_logtype_dpi >= 0)
+ rte_log_set_level(otx2_logtype_dpi, RTE_LOG_NOTICE);
+
+ otx2_logtype_ep = rte_log_register("pmd.raw.octeontx2.ep");
+ if (otx2_logtype_ep >= 0)
+ rte_log_set_level(otx2_logtype_ep, RTE_LOG_NOTICE);
+
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h
new file mode 100644
index 000000000..2168cde4d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_COMMON_H_
+#define _OTX2_COMMON_H_
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_io.h>
+
+#include "hw/otx2_rvu.h"
+#include "hw/otx2_nix.h"
+#include "hw/otx2_npc.h"
+#include "hw/otx2_npa.h"
+#include "hw/otx2_sdp.h"
+#include "hw/otx2_sso.h"
+#include "hw/otx2_ssow.h"
+#include "hw/otx2_tim.h"
+
+/* Alignment */
+#define OTX2_ALIGN 128
+
+/* Bits manipulation */
+#ifndef BIT_ULL
+#define BIT_ULL(nr) (1ULL << (nr))
+#endif
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
+#endif
+
+#ifndef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#endif
+
+#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
+
+/* Intra device related functions */
+struct otx2_npa_lf;
+struct otx2_idev_cfg {
+ uint16_t sso_pf_func;
+ uint16_t npa_pf_func;
+ struct otx2_npa_lf *npa_lf;
+ RTE_STD_C11
+ union {
+ rte_atomic16_t npa_refcnt;
+ uint16_t npa_refcnt_u16;
+ };
+ uint64_t npa_lock_mask;
+};
+
+__rte_internal
+struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void);
+__rte_internal
+void otx2_sso_pf_func_set(uint16_t sso_pf_func);
+__rte_internal
+uint16_t otx2_sso_pf_func_get(void);
+__rte_internal
+uint16_t otx2_npa_pf_func_get(void);
+__rte_internal
+struct otx2_npa_lf *otx2_npa_lf_obj_get(void);
+__rte_internal
+void otx2_npa_set_defaults(struct otx2_idev_cfg *idev);
+__rte_internal
+int otx2_npa_lf_active(void *dev);
+__rte_internal
+int otx2_npa_lf_obj_ref(void);
+__rte_internal
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist);
+
+/* Log */
+extern int otx2_logtype_base;
+extern int otx2_logtype_mbox;
+extern int otx2_logtype_npa;
+extern int otx2_logtype_nix;
+extern int otx2_logtype_sso;
+extern int otx2_logtype_npc;
+extern int otx2_logtype_tm;
+extern int otx2_logtype_tim;
+extern int otx2_logtype_dpi;
+extern int otx2_logtype_ep;
+
+#define otx2_err(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", \
+ __func__, __LINE__, ## args)
+
+#define otx2_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, fmt"\n", ## args)
+
+#define otx2_dbg(subsystem, fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, otx2_logtype_ ## subsystem, \
+ "[%s] %s():%u " fmt "\n", \
+ #subsystem, __func__, __LINE__, ##args)
+
+#define otx2_base_dbg(fmt, ...) otx2_dbg(base, fmt, ##__VA_ARGS__)
+#define otx2_mbox_dbg(fmt, ...) otx2_dbg(mbox, fmt, ##__VA_ARGS__)
+#define otx2_npa_dbg(fmt, ...) otx2_dbg(npa, fmt, ##__VA_ARGS__)
+#define otx2_nix_dbg(fmt, ...) otx2_dbg(nix, fmt, ##__VA_ARGS__)
+#define otx2_sso_dbg(fmt, ...) otx2_dbg(sso, fmt, ##__VA_ARGS__)
+#define otx2_npc_dbg(fmt, ...) otx2_dbg(npc, fmt, ##__VA_ARGS__)
+#define otx2_tm_dbg(fmt, ...) otx2_dbg(tm, fmt, ##__VA_ARGS__)
+#define otx2_tim_dbg(fmt, ...) otx2_dbg(tim, fmt, ##__VA_ARGS__)
+#define otx2_dpi_dbg(fmt, ...) otx2_dbg(dpi, fmt, ##__VA_ARGS__)
+#define otx2_sdp_dbg(fmt, ...) otx2_dbg(ep, fmt, ##__VA_ARGS__)
+
+/* PCI IDs */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF 0xA0FA
+#define PCI_DEVID_OCTEONTX2_RVU_NPA_PF 0xA0FB
+#define PCI_DEVID_OCTEONTX2_RVU_NPA_VF 0xA0FC
+#define PCI_DEVID_OCTEONTX2_RVU_CPT_PF 0xA0FD
+#define PCI_DEVID_OCTEONTX2_RVU_CPT_VF 0xA0FE
+#define PCI_DEVID_OCTEONTX2_RVU_AF_VF 0xA0f8
+#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
+#define PCI_DEVID_OCTEONTX2_EP_VF 0xB203 /* OCTEON TX2 EP mode */
+#define PCI_DEVID_OCTEONTX2_RVU_SDP_PF 0xA0f6
+#define PCI_DEVID_OCTEONTX2_RVU_SDP_VF 0xA0f7
+
+/*
+ * REVID for RVU PCIe devices.
+ * Bits 0..1: minor pass
+ * Bits 3..2: major pass
+ * Bits 7..4: midr id, 0:96, 1:95, 2:loki, f:unknown
+ */
+
+#define RVU_PCI_REV_MIDR_ID(rev_id) (rev_id >> 4)
+#define RVU_PCI_REV_MAJOR(rev_id) ((rev_id >> 2) & 0x3)
+#define RVU_PCI_REV_MINOR(rev_id) (rev_id & 0x3)
+
+#define RVU_PCI_CN96XX_MIDR_ID 0x0
+#define RVU_PCI_CNF95XX_MIDR_ID 0x1
+
+/* PCI Config offsets */
+#define RVU_PCI_REVISION_ID 0x08
+
+/* IO Access */
+#define otx2_read64(addr) rte_read64_relaxed((void *)(addr))
+#define otx2_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
+
+#if defined(RTE_ARCH_ARM64)
+#include "otx2_io_arm64.h"
+#else
+#include "otx2_io_generic.h"
+#endif
+
+/* Fastpath lookup */
+#define OTX2_NIX_FASTPATH_LOOKUP_MEM "otx2_nix_fastpath_lookup_mem"
+#define OTX2_NIX_SA_TBL_START (4096*4 + 69632*2)
+
+#endif /* _OTX2_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c
new file mode 100644
index 000000000..d61c712fa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c
@@ -0,0 +1,1043 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_memcpy.h>
+
+#include "otx2_dev.h"
+#include "otx2_mbox.h"
+
+#define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
+#define RVU_MAX_INT_RETRY 3
+
+/* PF/VF message handling timer */
+#define VF_PF_MBOX_TIMER_MS (20 * 1000)
+
+static void *
+mbox_mem_map(off_t off, size_t size)
+{
+ void *va = MAP_FAILED;
+ int mem_fd;
+
+ if (size <= 0)
+ goto error;
+
+ mem_fd = open("/dev/mem", O_RDWR);
+ if (mem_fd < 0)
+ goto error;
+
+ va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, off);
+ close(mem_fd);
+
+ if (va == MAP_FAILED)
+ otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
+ size, mem_fd, (intmax_t)off);
+error:
+ return va;
+}
+
+static void
+mbox_mem_unmap(void *va, size_t size)
+{
+ if (va)
+ munmap(va, size);
+}
+
+static int
+pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
+{
+ uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ volatile uint64_t int_status;
+ struct mbox_msghdr *msghdr;
+ uint64_t off;
+ int rc = 0;
+
+ /* We need to disable PF interrupts. We are in timer interrupt */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ /* Send message */
+ otx2_mbox_msg_send(mbox, 0);
+
+ do {
+ rte_delay_ms(sleep);
+ timeout += sleep;
+ if (timeout >= MBOX_RSP_TIMEOUT) {
+ otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
+ rc = -EIO;
+ break;
+ }
+ int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
+ } while ((int_status & 0x1) != 0x1);
+
+ /* Clear */
+ otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
+
+ /* Enable interrupts */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+ if (rc == 0) {
+ /* Get message */
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
+ if (rsp)
+ *rsp = msghdr;
+ rc = msghdr->rc;
+ }
+
+ return rc;
+}
+
+static int
+af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
+{
+ uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ volatile uint64_t int_status;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct mbox_msghdr *rsp;
+ uint64_t offset;
+ size_t size;
+ int i;
+
+ /* We need to disable PF interrupts. We are in timer interrupt */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ /* Send message */
+ otx2_mbox_msg_send(mbox, 0);
+
+ do {
+ rte_delay_ms(sleep);
+ timeout++;
+ if (timeout >= MBOX_RSP_TIMEOUT) {
+ otx2_err("Routed messages %d timeout: %dms",
+ num_msg, MBOX_RSP_TIMEOUT);
+ break;
+ }
+ int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
+ } while ((int_status & 0x1) != 0x1);
+
+ /* Clear */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
+
+ /* Enable interrupts */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs != num_msg)
+ otx2_err("Routed messages: %d received: %d", num_msg,
+ req_hdr->num_msgs);
+
+ /* Get messages from mbox */
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ size = mbox->rx_start + msg->next_msgoff - offset;
+
+ /* Reserve PF/VF mbox message */
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
+ otx2_mbox_rsp_init(msg->id, rsp);
+
+ /* Copy message from AF<->PF mbox to PF<->VF mbox */
+ otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size - sizeof(struct mbox_msghdr));
+
+ /* Set status and sender pf_func data */
+ rsp->rc = msg->rc;
+ rsp->pcifunc = msg->pcifunc;
+
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return req_hdr->num_msgs;
+}
+
+static int
+vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
+{
+ int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ size_t size;
+ uint16_t i;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (!req_hdr->num_msgs)
+ return 0;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ size = mbox->rx_start + msg->next_msgoff - offset;
+
+ /* RVU_PF_FUNC_S */
+ msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+ if (msg->id == MBOX_MSG_READY) {
+ struct ready_msg_rsp *rsp;
+ uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
+
+ /* Handle READY message in PF */
+ dev->active_vfs[vf / max_bits] |=
+ BIT_ULL(vf % max_bits);
+ rsp = (struct ready_msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
+ otx2_mbox_rsp_init(msg->id, rsp);
+
+ /* PF/VF function ID */
+ rsp->hdr.pcifunc = msg->pcifunc;
+ rsp->hdr.rc = 0;
+ } else {
+ struct mbox_msghdr *af_req;
+ /* Reserve AF/PF mbox message */
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
+ otx2_mbox_req_init(msg->id, af_req);
+
+ /* Copy message from VF<->PF mbox to PF<->AF mbox */
+ otx2_mbox_memcpy((uint8_t *)af_req +
+ sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size - sizeof(struct mbox_msghdr));
+ af_req->pcifunc = msg->pcifunc;
+ routed++;
+ }
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ if (routed > 0) {
+ otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
+ dev->pf, routed, vf);
+ af_pf_wait_msg(dev, vf, routed);
+ otx2_mbox_reset(dev->mbox, 0);
+ }
+
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs) {
+ otx2_base_dbg("pf:%d reply %d messages to vf:%d",
+ dev->pf, mdev->num_msgs, vf);
+ otx2_mbox_msg_send(mbox, vf);
+ }
+
+ return i;
+}
+
+static int
+vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
+{
+ struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int msgs_acked = 0;
+ int offset;
+ uint16_t i;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs == 0)
+ return 0;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+
+ msgs_acked++;
+ /* RVU_PF_FUNC_S */
+ msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
+ msg->id, otx2_mbox_id2name(msg->id),
+ msg->pcifunc, otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+ break;
+ case MBOX_MSG_CGX_PTP_RX_INFO:
+ otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
+ msg->id, otx2_mbox_id2name(msg->id),
+ msg->pcifunc, otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+ break;
+ default:
+ otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
+ msg->id, otx2_mbox_id2name(msg->id),
+ msg->pcifunc);
+ }
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ otx2_mbox_reset(mbox, vf);
+ mdev->msgs_acked = msgs_acked;
+ rte_wmb();
+
+ return i;
+}
+
+static void
+otx2_vf_pf_mbox_handle_msg(void *param)
+{
+ uint16_t vf, max_vf, max_bits;
+ struct otx2_dev *dev = param;
+
+ max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
+ max_vf = max_bits * MAX_VFPF_DWORD_BITS;
+
+ for (vf = 0; vf < max_vf; vf++) {
+ if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
+ otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
+ vf, dev->pf, dev->vf);
+ vf_pf_process_msgs(dev, vf);
+ /* UP messages */
+ vf_pf_process_up_msgs(dev, vf);
+ dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
+ }
+ }
+ dev->timer_set = 0;
+}
+
+static void
+otx2_vf_pf_mbox_irq(void *param)
+{
+ struct otx2_dev *dev = param;
+ bool alarm_set = false;
+ uint64_t intr;
+ int vfpf;
+
+ for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
+ intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
+ if (!intr)
+ continue;
+
+ otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
+ vfpf, intr, dev->pf, dev->vf);
+
+ /* Save and clear intr bits */
+ dev->intr.bits[vfpf] |= intr;
+ otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
+ alarm_set = true;
+ }
+
+ if (!dev->timer_set && alarm_set) {
+ dev->timer_set = 1;
+ /* Start timer to handle messages */
+ rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
+ otx2_vf_pf_mbox_handle_msg, dev);
+ }
+}
+
+static void
+otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int msgs_acked = 0;
+ int offset;
+ uint16_t i;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+
+ msgs_acked++;
+ otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
+ msg->id, otx2_mbox_id2name(msg->id),
+ otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+
+ switch (msg->id) {
+ /* Add message id's that are handled here */
+ case MBOX_MSG_READY:
+ /* Get our identity */
+ dev->pf_func = msg->pcifunc;
+ break;
+
+ default:
+ if (msg->rc)
+ otx2_err("Message (%s) response has err=%d",
+ otx2_mbox_id2name(msg->id), msg->rc);
+ break;
+ }
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+ /* Update acked if someone is waiting a message */
+ mdev->msgs_acked = msgs_acked;
+ rte_wmb();
+}
+
+/* Copies the message received from AF and sends it to VF */
+static void
+pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
+{
+ uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
+ struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
+ struct msg_req *msg = rec_msg;
+ struct mbox_msghdr *vf_msg;
+ uint16_t vf;
+ size_t size;
+
+ size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
+ /* Send UP message to all VF's */
+ for (vf = 0; vf < vf_mbox->ndevs; vf++) {
+ /* VF active */
+ if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
+ continue;
+
+ otx2_base_dbg("(%s) size: %zx to VF: %d",
+ otx2_mbox_id2name(msg->hdr.id), size, vf);
+
+ /* Reserve PF/VF mbox message */
+ vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
+ if (!vf_msg) {
+ otx2_err("Failed to alloc VF%d UP message", vf);
+ continue;
+ }
+ otx2_mbox_req_init(msg->hdr.id, vf_msg);
+
+ /*
+ * Copy message from AF<->PF UP mbox
+ * to PF<->VF UP mbox
+ */
+ otx2_mbox_memcpy((uint8_t *)vf_msg +
+ sizeof(struct mbox_msghdr), (uint8_t *)msg
+ + sizeof(struct mbox_msghdr), size -
+ sizeof(struct mbox_msghdr));
+
+ vf_msg->rc = msg->hdr.rc;
+ /* Set PF to be a sender */
+ vf_msg->pcifunc = dev->pf_func;
+
+ /* Send to VF */
+ otx2_mbox_msg_send(vf_mbox, vf);
+ }
+}
+
+static int
+otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
+ struct cgx_link_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ struct cgx_link_user_info *linfo = &msg->link_info;
+
+ otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
+ otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
+ linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
+ otx2_mbox_id2name(msg->hdr.id),
+ otx2_get_pf(msg->hdr.pcifunc),
+ otx2_get_vf(msg->hdr.pcifunc));
+
+ /* PF gets link notification from AF */
+ if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
+ if (dev->ops && dev->ops->link_status_update)
+ dev->ops->link_status_update(dev, linfo);
+
+ /* Forward the same message as received from AF to VF */
+ pf_vf_mbox_send_up_msg(dev, msg);
+ } else {
+ /* VF gets link up notification */
+ if (dev->ops && dev->ops->link_status_update)
+ dev->ops->link_status_update(dev, linfo);
+ }
+
+ rsp->hdr.rc = 0;
+ return 0;
+}
+
+static int
+otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
+ struct cgx_ptp_rx_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
+ otx2_get_pf(dev->pf_func),
+ otx2_get_vf(dev->pf_func),
+ msg->ptp_en ? "ENABLED" : "DISABLED",
+ msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
+ otx2_get_pf(msg->hdr.pcifunc),
+ otx2_get_vf(msg->hdr.pcifunc));
+
+ /* PF gets PTP notification from AF */
+ if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
+ if (dev->ops && dev->ops->ptp_info_update)
+ dev->ops->ptp_info_update(dev, msg->ptp_en);
+
+ /* Forward the same message as received from AF to VF */
+ pf_vf_mbox_send_up_msg(dev, msg);
+ } else {
+ /* VF gets PTP notification */
+ if (dev->ops && dev->ops->ptp_info_update)
+ dev->ops->ptp_info_update(dev, msg->ptp_en);
+ }
+
+ rsp->hdr.rc = 0;
+ return 0;
+}
+
+static int
+mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ return -EIO;
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &dev->mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = dev->pf_func; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ dev, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+ default :
+ otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
+ }
+
+ return -ENODEV;
+}
+
+static void
+otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int i, err, offset;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+
+ otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
+ msg->id, otx2_mbox_id2name(msg->id),
+ otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+ err = mbox_process_msgs_up(dev, msg);
+ if (err)
+ otx2_err("Error %d handling 0x%x (%s)",
+ err, msg->id, otx2_mbox_id2name(msg->id));
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ /* Send mbox responses */
+ if (mdev->num_msgs) {
+ otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
+ otx2_mbox_msg_send(mbox, 0);
+ }
+}
+
+static void
+otx2_pf_vf_mbox_irq(void *param)
+{
+ struct otx2_dev *dev = param;
+ uint64_t intr;
+
+ intr = otx2_read64(dev->bar2 + RVU_VF_INT);
+ if (intr == 0)
+ otx2_base_dbg("Proceeding to check mbox UP messages if any");
+
+ otx2_write64(intr, dev->bar2 + RVU_VF_INT);
+ otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
+
+ /* First process all configuration messages */
+ otx2_process_msgs(dev, dev->mbox);
+
+ /* Process Uplink messages */
+ otx2_process_msgs_up(dev, &dev->mbox_up);
+}
+
+static void
+otx2_af_pf_mbox_irq(void *param)
+{
+ struct otx2_dev *dev = param;
+ uint64_t intr;
+
+ intr = otx2_read64(dev->bar2 + RVU_PF_INT);
+ if (intr == 0)
+ otx2_base_dbg("Proceeding to check mbox UP messages if any");
+
+ otx2_write64(intr, dev->bar2 + RVU_PF_INT);
+ otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
+
+ /* First process all configuration messages */
+ otx2_process_msgs(dev, dev->mbox);
+
+ /* Process Uplink messages */
+ otx2_process_msgs_up(dev, &dev->mbox_up);
+}
+
+static int
+mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i, rc;
+
+ /* HW clear irq */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+ otx2_write64(~0ull, dev->bar2 +
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
+
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ dev->timer_set = 0;
+
+ /* MBOX interrupt for VF(0...63) <-> PF */
+ rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX0);
+
+ if (rc) {
+ otx2_err("Fail to register PF(VF0-63) mbox irq");
+ return rc;
+ }
+ /* MBOX interrupt for VF(64...128) <-> PF */
+ rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX1);
+
+ if (rc) {
+ otx2_err("Fail to register PF(VF64-128) mbox irq");
+ return rc;
+ }
+ /* MBOX interrupt AF <-> PF */
+ rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
+ dev, RVU_PF_INT_VEC_AFPF_MBOX);
+ if (rc) {
+ otx2_err("Fail to register AF<->PF mbox irq");
+ return rc;
+ }
+
+ /* HW enable intr */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+ otx2_write64(~0ull, dev->bar2 +
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
+
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+ return rc;
+}
+
+static int
+mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int rc;
+
+ /* Clear irq */
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
+
+ /* MBOX interrupt PF <-> VF */
+ rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
+ dev, RVU_VF_INT_VEC_MBOX);
+ if (rc) {
+ otx2_err("Fail to register PF<->VF mbox irq");
+ return rc;
+ }
+
+ /* HW enable intr */
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
+
+ return rc;
+}
+
+static int
+mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ if (otx2_dev_is_vf(dev))
+ return mbox_register_vf_irq(pci_dev, dev);
+ else
+ return mbox_register_pf_irq(pci_dev, dev);
+}
+
+static void
+mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i;
+
+ /* HW clear irq */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+ otx2_write64(~0ull, dev->bar2 +
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
+
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ dev->timer_set = 0;
+
+ rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
+
+ /* Unregister the interrupt handler for each vectors */
+ /* MBOX interrupt for VF(0...63) <-> PF */
+ otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX0);
+
+ /* MBOX interrupt for VF(64...128) <-> PF */
+ otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX1);
+
+ /* MBOX interrupt AF <-> PF */
+ otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_AFPF_MBOX);
+
+}
+
+static void
+mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ /* Clear irq */
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
+
+ /* Unregister the interrupt handler */
+ otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
+ RVU_VF_INT_VEC_MBOX);
+}
+
+static void
+mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ if (otx2_dev_is_vf(dev))
+ mbox_unregister_vf_irq(pci_dev, dev);
+ else
+ mbox_unregister_pf_irq(pci_dev, dev);
+}
+
+static int
+vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msg_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_vf_flr(mbox);
+ /* Overwrite pcifunc to indicate VF */
+ req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+ /* Sync message in interrupt context */
+ rc = pf_af_sync_msg(dev, NULL);
+ if (rc)
+ otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
+
+ return rc;
+}
+
+static void
+otx2_pf_vf_flr_irq(void *param)
+{
+ struct otx2_dev *dev = (struct otx2_dev *)param;
+ uint16_t max_vf = 64, vf;
+ uintptr_t bar2;
+ uint64_t intr;
+ int i;
+
+ max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
+ bar2 = dev->bar2;
+
+ otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
+
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
+ intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
+ if (!intr)
+ continue;
+
+ for (vf = 0; vf < max_vf; vf++) {
+ if (!(intr & (1ULL << vf)))
+ continue;
+
+ otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
+ i, intr, (64 * i + vf));
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
+ /* Disable the interrupt */
+ otx2_write64(BIT_ULL(vf),
+ bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
+ /* Inform AF about VF reset */
+ vf_flr_send_msg(dev, vf);
+
+ /* Signal FLR finish */
+ otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
+ /* Enable interrupt */
+ otx2_write64(~0ull,
+ bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
+ }
+ }
+}
+
+static int
+vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i;
+
+ otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
+
+ /* HW clear irq */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
+
+ otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR0);
+
+ otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR1);
+
+ return 0;
+}
+
+static int
+vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int i, rc;
+
+ otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
+
+ rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR0);
+ if (rc)
+ otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
+
+ rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR1);
+ if (rc)
+ otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
+
+ /* Enable HW interrupt */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
+ }
+ return 0;
+}
+
+/**
+ * @internal
+ * Get number of active VFs for the given PF device.
+ */
+int
+otx2_dev_active_vfs(void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ int i, count = 0;
+
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
+ count += __builtin_popcount(dev->active_vfs[i]);
+
+ return count;
+}
+
+static void
+otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVID_OCTEONTX2_RVU_PF:
+ break;
+ case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
+ dev->hwcap |= OTX2_HWCAP_F_VF;
+ break;
+ }
+}
+
+/**
+ * @internal
+ * Initialize the otx2 device
+ */
+int
+otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ int up_direction = MBOX_DIR_PFAF_UP;
+ int rc, direction = MBOX_DIR_PFAF;
+ uint64_t intr_offset = RVU_PF_INT;
+ struct otx2_dev *dev = otx2_dev;
+ uintptr_t bar2, bar4;
+ uint64_t bar4_addr;
+ void *hwbase;
+
+ bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
+ bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
+
+ if (bar2 == 0 || bar4 == 0) {
+ otx2_err("Failed to get pci bars");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ dev->node = pci_dev->device.numa_node;
+ dev->maxvf = pci_dev->max_vfs;
+ dev->bar2 = bar2;
+ dev->bar4 = bar4;
+
+ otx2_update_vf_hwcap(pci_dev, dev);
+
+ if (otx2_dev_is_vf(dev)) {
+ direction = MBOX_DIR_VFPF;
+ up_direction = MBOX_DIR_VFPF_UP;
+ intr_offset = RVU_VF_INT;
+ }
+
+ /* Initialize the local mbox */
+ rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
+ intr_offset);
+ if (rc)
+ goto error;
+ dev->mbox = &dev->mbox_local;
+
+ rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
+ intr_offset);
+ if (rc)
+ goto error;
+
+ /* Register mbox interrupts */
+ rc = mbox_register_irq(pci_dev, dev);
+ if (rc)
+ goto mbox_fini;
+
+ /* Check the readiness of PF/VF */
+ rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
+ if (rc)
+ goto mbox_unregister;
+
+ dev->pf = otx2_get_pf(dev->pf_func);
+ dev->vf = otx2_get_vf(dev->pf_func);
+ memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
+
+ /* Found VF devices in a PF device */
+ if (pci_dev->max_vfs > 0) {
+
+ /* Remap mbox area for all vf's */
+ bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
+ if (bar4_addr == 0) {
+ rc = -ENODEV;
+ goto mbox_fini;
+ }
+
+ hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
+ if (hwbase == MAP_FAILED) {
+ rc = -ENOMEM;
+ goto mbox_fini;
+ }
+ /* Init mbox object */
+ rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
+ bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
+ intr_offset);
+ if (rc)
+ goto iounmap;
+
+ /* PF -> VF UP messages */
+ rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
+ bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
+ intr_offset);
+ if (rc)
+ goto mbox_fini;
+ }
+
+ /* Register VF-FLR irq handlers */
+ if (otx2_dev_is_pf(dev)) {
+ rc = vf_flr_register_irqs(pci_dev, dev);
+ if (rc)
+ goto iounmap;
+ }
+ dev->mbox_active = 1;
+ return rc;
+
+iounmap:
+ mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
+mbox_unregister:
+ mbox_unregister_irq(pci_dev, dev);
+mbox_fini:
+ otx2_mbox_fini(dev->mbox);
+ otx2_mbox_fini(&dev->mbox_up);
+error:
+ return rc;
+}
+
+/**
+ * @internal
+ * Finalize the otx2 device
+ */
+void
+otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct otx2_dev *dev = otx2_dev;
+ struct otx2_idev_cfg *idev;
+ struct otx2_mbox *mbox;
+
+ /* Clear references to this pci dev */
+ idev = otx2_intra_dev_get_cfg();
+ if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
+ idev->npa_lf = NULL;
+
+ mbox_unregister_irq(pci_dev, dev);
+
+ if (otx2_dev_is_pf(dev))
+ vf_flr_unregister_irqs(pci_dev, dev);
+ /* Release PF - VF */
+ mbox = &dev->mbox_vfpf;
+ if (mbox->hwbase && mbox->dev)
+ mbox_mem_unmap((void *)mbox->hwbase,
+ MBOX_SIZE * pci_dev->max_vfs);
+ otx2_mbox_fini(mbox);
+ mbox = &dev->mbox_vfpf_up;
+ otx2_mbox_fini(mbox);
+
+ /* Release PF - AF */
+ mbox = dev->mbox;
+ otx2_mbox_fini(mbox);
+ mbox = &dev->mbox_up;
+ otx2_mbox_fini(mbox);
+ dev->mbox_active = 0;
+
+ /* Disable MSIX vectors */
+ otx2_disable_irqs(intr_handle);
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h
new file mode 100644
index 000000000..cd4fe517d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_DEV_H
+#define _OTX2_DEV_H
+
+#include <rte_bus_pci.h>
+
+#include "otx2_common.h"
+#include "otx2_irq.h"
+#include "otx2_mbox.h"
+#include "otx2_mempool.h"
+
+/* Common HWCAP flags. Use from LSB bits */
+#define OTX2_HWCAP_F_VF BIT_ULL(8) /* VF device */
+#define otx2_dev_is_vf(dev) (dev->hwcap & OTX2_HWCAP_F_VF)
+#define otx2_dev_is_pf(dev) (!(dev->hwcap & OTX2_HWCAP_F_VF))
+#define otx2_dev_is_lbk(dev) ((dev->hwcap & OTX2_HWCAP_F_VF) && \
+ (dev->tx_chan_base < 0x700))
+#define otx2_dev_revid(dev) (dev->hwcap & 0xFF)
+#define otx2_dev_is_sdp(dev) (dev->sdp_link)
+
+#define otx2_dev_is_vf_or_sdp(dev) \
+ (otx2_dev_is_vf(dev) || otx2_dev_is_sdp(dev))
+
+#define otx2_dev_is_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0))
+#define otx2_dev_is_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0))
+
+#define otx2_dev_is_95xx_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
+#define otx2_dev_is_95xx_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
+
+#define otx2_dev_is_96xx_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+#define otx2_dev_is_96xx_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
+#define otx2_dev_is_96xx_Cx(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
+#define otx2_dev_is_96xx_C0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
+struct otx2_dev;
+
+/* Link status callback */
+typedef void (*otx2_link_status_t)(struct otx2_dev *dev,
+ struct cgx_link_user_info *link);
+/* PTP info callback */
+typedef int (*otx2_ptp_info_t)(struct otx2_dev *dev, bool ptp_en);
+
+struct otx2_dev_ops {
+ otx2_link_status_t link_status_update;
+ otx2_ptp_info_t ptp_info_update;
+};
+
+#define OTX2_DEV \
+ int node __rte_cache_aligned; \
+ uint16_t pf; \
+ int16_t vf; \
+ uint16_t pf_func; \
+ uint8_t mbox_active; \
+ bool drv_inited; \
+ uint64_t active_vfs[MAX_VFPF_DWORD_BITS]; \
+ uintptr_t bar2; \
+ uintptr_t bar4; \
+ struct otx2_mbox mbox_local; \
+ struct otx2_mbox mbox_up; \
+ struct otx2_mbox mbox_vfpf; \
+ struct otx2_mbox mbox_vfpf_up; \
+ otx2_intr_t intr; \
+ int timer_set; /* ~0 : no alarm handling */ \
+ uint64_t hwcap; \
+ struct otx2_npa_lf npalf; \
+ struct otx2_mbox *mbox; \
+ uint16_t maxvf; \
+ const struct otx2_dev_ops *ops
+
+struct otx2_dev {
+ OTX2_DEV;
+};
+
+__rte_internal
+int otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev);
+
+/* Common dev init and fini routines */
+
+static __rte_always_inline int
+otx2_dev_init(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ uint8_t rev_id;
+ int rc;
+
+ rc = rte_pci_read_config(pci_dev, &rev_id,
+ 1, RVU_PCI_REVISION_ID);
+ if (rc != 1) {
+ otx2_err("Failed to read pci revision id, rc=%d", rc);
+ return rc;
+ }
+
+ dev->hwcap = rev_id;
+ return otx2_dev_priv_init(pci_dev, otx2_dev);
+}
+
+__rte_internal
+void otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev);
+__rte_internal
+int otx2_dev_active_vfs(void *otx2_dev);
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+static inline int
+otx2_get_vf(uint16_t pf_func)
+{
+ return (((pf_func >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK) - 1);
+}
+
+static inline int
+otx2_get_pf(uint16_t pf_func)
+{
+ return (pf_func >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+static inline int
+otx2_pfvf_func(int pf, int vf)
+{
+ return (pf << RVU_PFVF_PF_SHIFT) | ((vf << RVU_PFVF_FUNC_SHIFT) + 1);
+}
+
+static inline int
+otx2_is_afvf(uint16_t pf_func)
+{
+ return !(pf_func & ~RVU_PFVF_FUNC_MASK);
+}
+
+#endif /* _OTX2_DEV_H */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h
new file mode 100644
index 000000000..7e45329b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_IO_ARM64_H_
+#define _OTX2_IO_ARM64_H_
+
+#define otx2_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define otx2_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1],#0]!" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+
+#define otx2_prefetch_store_keep(ptr) ({\
+ asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (ptr)); })
+
+static __rte_always_inline uint64_t
+otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
+{
+ uint64_t result;
+
+ /* Atomic add with no ordering */
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+static __rte_always_inline uint64_t
+otx2_atomic64_add_sync(int64_t incr, int64_t *ptr)
+{
+ uint64_t result;
+
+ /* Atomic add with ordering */
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+static __rte_always_inline uint64_t
+otx2_lmt_submit(rte_iova_t io_address)
+{
+ uint64_t result;
+
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldeor xzr,%x[rf],[%[rs]]" :
+ [rf] "=r"(result): [rs] "r"(io_address));
+ return result;
+}
+
+static __rte_always_inline void
+otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
+{
+ volatile const __uint128_t *src128 = (const __uint128_t *)in;
+ volatile __uint128_t *dst128 = (__uint128_t *)out;
+ dst128[0] = src128[0];
+ dst128[1] = src128[1];
+ /* lmtext receives following value:
+ * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
+ * 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
+ */
+ if (lmtext) {
+ dst128[2] = src128[2];
+ if (lmtext > 1)
+ dst128[3] = src128[3];
+ }
+}
+
+static __rte_always_inline void
+otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
+{
+ volatile const __uint128_t *src128 = (const __uint128_t *)in;
+ volatile __uint128_t *dst128 = (__uint128_t *)out;
+ uint8_t i;
+
+ for (i = 0; i < segdw; i++)
+ dst128[i] = src128[i];
+}
+
+#endif /* _OTX2_IO_ARM64_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h
new file mode 100644
index 000000000..b1d754008
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_IO_GENERIC_H_
+#define _OTX2_IO_GENERIC_H_
+
+#define otx2_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64_relaxed((void *)(addr)); \
+ val1 = rte_read64_relaxed((uint8_t *)(addr) + 8); \
+} while (0)
+
+#define otx2_store_pair(val0, val1, addr) \
+do { \
+ rte_write64_relaxed(val0, (void *)(addr)); \
+ rte_write64_relaxed(val1, (((uint8_t *)(addr)) + 8)); \
+} while (0)
+
+#define otx2_prefetch_store_keep(ptr) do {} while (0)
+
+static inline uint64_t
+otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
+{
+ RTE_SET_USED(ptr);
+ RTE_SET_USED(incr);
+
+ return 0;
+}
+
+static inline uint64_t
+otx2_atomic64_add_sync(int64_t incr, int64_t *ptr)
+{
+ RTE_SET_USED(ptr);
+ RTE_SET_USED(incr);
+
+ return 0;
+}
+
+static inline int64_t
+otx2_lmt_submit(uint64_t io_address)
+{
+ RTE_SET_USED(io_address);
+
+ return 0;
+}
+
+static __rte_always_inline void
+otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
+{
+ RTE_SET_USED(out);
+ RTE_SET_USED(in);
+ RTE_SET_USED(lmtext);
+}
+
+static __rte_always_inline void
+otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
+{
+ RTE_SET_USED(out);
+ RTE_SET_USED(in);
+ RTE_SET_USED(segdw);
+}
+#endif /* _OTX2_IO_GENERIC_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c
new file mode 100644
index 000000000..fa3206af5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_interrupts.h>
+
+#include "otx2_common.h"
+#include "otx2_irq.h"
+
+#ifdef RTE_EAL_VFIO
+
+#include <inttypes.h>
+#include <linux/vfio.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define MAX_INTR_VEC_ID RTE_MAX_RXTX_INTR_VEC_ID
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (MAX_INTR_VEC_ID))
+
+static int
+irq_get_info(struct rte_intr_handle *intr_handle)
+{
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int rc;
+
+ irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (rc < 0) {
+ otx2_err("Failed to get IRQ info rc=%d errno=%d", rc, errno);
+ return rc;
+ }
+
+ otx2_base_dbg("Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x",
+ irq.flags, irq.index, irq.count, MAX_INTR_VEC_ID);
+
+ if (irq.count > MAX_INTR_VEC_ID) {
+ otx2_err("HW max=%d > MAX_INTR_VEC_ID: %d",
+ intr_handle->max_intr, MAX_INTR_VEC_ID);
+ intr_handle->max_intr = MAX_INTR_VEC_ID;
+ } else {
+ intr_handle->max_intr = irq.count;
+ }
+
+ return 0;
+}
+
+static int
+irq_config(struct rte_intr_handle *intr_handle, unsigned int vec)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int32_t *fd_ptr;
+ int len, rc;
+
+ if (vec > intr_handle->max_intr) {
+ otx2_err("vector=%d greater than max_intr=%d", vec,
+ intr_handle->max_intr);
+ return -EINVAL;
+ }
+
+ len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+
+ irq_set->start = vec;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ /* Use vec fd to set interrupt vectors */
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ fd_ptr[0] = intr_handle->efds[vec];
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ otx2_err("Failed to set_irqs vector=0x%x rc=%d", vec, rc);
+
+ return rc;
+}
+
+static int
+irq_init(struct rte_intr_handle *intr_handle)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int32_t *fd_ptr;
+ int len, rc;
+ uint32_t i;
+
+ if (intr_handle->max_intr > MAX_INTR_VEC_ID) {
+ otx2_err("Max_intr=%d greater than MAX_INTR_VEC_ID=%d",
+ intr_handle->max_intr, MAX_INTR_VEC_ID);
+ return -ERANGE;
+ }
+
+ len = sizeof(struct vfio_irq_set) +
+ sizeof(int32_t) * intr_handle->max_intr;
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->start = 0;
+ irq_set->count = intr_handle->max_intr;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ for (i = 0; i < irq_set->count; i++)
+ fd_ptr[i] = -1;
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ otx2_err("Failed to set irqs vector rc=%d", rc);
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Disable IRQ
+ */
+int
+otx2_disable_irqs(struct rte_intr_handle *intr_handle)
+{
+ /* Clear max_intr to indicate re-init next time */
+ intr_handle->max_intr = 0;
+ return rte_intr_disable(intr_handle);
+}
+
+/**
+ * @internal
+ * Register IRQ
+ */
+int
+otx2_register_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec)
+{
+ struct rte_intr_handle tmp_handle;
+ int rc;
+
+ /* If no max_intr read from VFIO */
+ if (intr_handle->max_intr == 0) {
+ irq_get_info(intr_handle);
+ irq_init(intr_handle);
+ }
+
+ if (vec > intr_handle->max_intr) {
+ otx2_err("Vector=%d greater than max_intr=%d", vec,
+ intr_handle->max_intr);
+ return -EINVAL;
+ }
+
+ tmp_handle = *intr_handle;
+ /* Create new eventfd for interrupt vector */
+ tmp_handle.fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (tmp_handle.fd == -1)
+ return -ENODEV;
+
+ /* Register vector interrupt callback */
+ rc = rte_intr_callback_register(&tmp_handle, cb, data);
+ if (rc) {
+ otx2_err("Failed to register vector:0x%x irq callback.", vec);
+ return rc;
+ }
+
+ intr_handle->efds[vec] = tmp_handle.fd;
+ intr_handle->nb_efd = (vec > intr_handle->nb_efd) ?
+ vec : intr_handle->nb_efd;
+ if ((intr_handle->nb_efd + 1) > intr_handle->max_intr)
+ intr_handle->max_intr = intr_handle->nb_efd + 1;
+
+ otx2_base_dbg("Enable vector:0x%x for vfio (efds: %d, max:%d)",
+ vec, intr_handle->nb_efd, intr_handle->max_intr);
+
+ /* Enable MSIX vectors to VFIO */
+ return irq_config(intr_handle, vec);
+}
+
+/**
+ * @internal
+ * Unregister IRQ
+ */
+void
+otx2_unregister_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec)
+{
+ struct rte_intr_handle tmp_handle;
+
+ if (vec > intr_handle->max_intr) {
+ otx2_err("Error unregistering MSI-X interrupts vec:%d > %d",
+ vec, intr_handle->max_intr);
+ return;
+ }
+
+ tmp_handle = *intr_handle;
+ tmp_handle.fd = intr_handle->efds[vec];
+ if (tmp_handle.fd == -1)
+ return;
+
+ /* Un-register callback func from eal lib */
+ rte_intr_callback_unregister(&tmp_handle, cb, data);
+
+ otx2_base_dbg("Disable vector:0x%x for vfio (efds: %d, max:%d)",
+ vec, intr_handle->nb_efd, intr_handle->max_intr);
+
+ if (intr_handle->efds[vec] != -1)
+ close(intr_handle->efds[vec]);
+ /* Disable MSIX vectors from VFIO */
+ intr_handle->efds[vec] = -1;
+ irq_config(intr_handle, vec);
+}
+
+#else
+
+/**
+ * @internal
+ * Register IRQ
+ */
+int otx2_register_irq(__rte_unused struct rte_intr_handle *intr_handle,
+ __rte_unused rte_intr_callback_fn cb,
+ __rte_unused void *data, __rte_unused unsigned int vec)
+{
+ return -ENOTSUP;
+}
+
+
+/**
+ * @internal
+ * Unregister IRQ
+ */
+void otx2_unregister_irq(__rte_unused struct rte_intr_handle *intr_handle,
+ __rte_unused rte_intr_callback_fn cb,
+ __rte_unused void *data, __rte_unused unsigned int vec)
+{
+}
+
+/**
+ * @internal
+ * Disable IRQ
+ */
+int otx2_disable_irqs(__rte_unused struct rte_intr_handle *intr_handle)
+{
+ return -ENOTSUP;
+}
+
+#endif /* RTE_EAL_VFIO */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h
new file mode 100644
index 000000000..0683cf554
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_IRQ_H_
+#define _OTX2_IRQ_H_
+
+#include <rte_pci.h>
+#include <rte_interrupts.h>
+
+#include "otx2_common.h"
+
+typedef struct {
+/* 128 devices translate to two 64 bits dwords */
+#define MAX_VFPF_DWORD_BITS 2
+ uint64_t bits[MAX_VFPF_DWORD_BITS];
+} otx2_intr_t;
+
+__rte_internal
+int otx2_register_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec);
+__rte_internal
+void otx2_unregister_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec);
+__rte_internal
+int otx2_disable_irqs(struct rte_intr_handle *intr_handle);
+
+#endif /* _OTX2_IRQ_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c
new file mode 100644
index 000000000..2b7810929
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+
+#include "otx2_mbox.h"
+#include "otx2_dev.h"
+
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+
+#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
+
+#define RVU_VF_VFPF_MBOX0 (0x0000)
+#define RVU_VF_VFPF_MBOX1 (0x0008)
+
+static inline uint16_t
+msgs_offset(void)
+{
+ return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+}
+
+void
+otx2_mbox_fini(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = 0;
+ mbox->hwbase = 0;
+ free(mbox->dev);
+ mbox->dev = NULL;
+}
+
+void
+otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->msg_size = 0;
+ tx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
+ rx_hdr->num_msgs = 0;
+ rte_spinlock_unlock(&mdev->mbox_lock);
+}
+
+int
+otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
+ int direction, int ndevs, uint64_t intr_offset)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ mbox->intr_offset = intr_offset;
+ mbox->reg_base = reg_base;
+ mbox->hwbase = hwbase;
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->dev = malloc(ndevs * sizeof(struct otx2_mbox_dev));
+ if (!mbox->dev) {
+ otx2_mbox_fini(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
+ rte_spinlock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Allocate a message response
+ */
+struct mbox_msghdr *
+otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
+ int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
+ goto exit;
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
+ mbox->tx_start + msgs_offset() + mdev->msg_size));
+
+ /* Clear the whole msg region */
+ otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset();
+exit:
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+
+/**
+ * @internal
+ * Send a mailbox message
+ */
+void
+otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+
+ /* Reset header for next messages */
+ tx_hdr->msg_size = mdev->msg_size;
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after copying txmem
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+
+ /* Sync mbox data into memory */
+ rte_wmb();
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ rte_write64(1, (volatile void *)(mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift))));
+}
+
+/**
+ * @internal
+ * Wait and get mailbox response
+ */
+int
+otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr;
+ uint64_t offset;
+ int rc;
+
+ rc = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (rc != 1)
+ return -EIO;
+
+ rte_rmb();
+
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ if (msg != NULL)
+ *msg = msghdr;
+
+ return msghdr->rc;
+}
+
+/**
+ * Polling for given wait time to get mailbox response
+ */
+static int
+mbox_poll(struct otx2_mbox *mbox, uint32_t wait)
+{
+ uint32_t timeout = 0, sleep = 1;
+ uint32_t wait_us = wait * 1000;
+ uint64_t rsp_reg = 0;
+ uintptr_t reg_addr;
+
+ reg_addr = mbox->reg_base + mbox->intr_offset;
+ do {
+ rsp_reg = otx2_read64(reg_addr);
+
+ if (timeout >= wait_us)
+ return -ETIMEDOUT;
+
+ rte_delay_us(sleep);
+ timeout += sleep;
+ } while (!rsp_reg);
+
+ rte_smp_rmb();
+
+ /* Clear interrupt */
+ otx2_write64(rsp_reg, reg_addr);
+
+ /* Reset mbox */
+ otx2_mbox_reset(mbox, 0);
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Wait and get mailbox response with timeout
+ */
+int
+otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
+ uint32_t tmo)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr;
+ uint64_t offset;
+ int rc;
+
+ rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
+ if (rc != 1)
+ return -EIO;
+
+ rte_rmb();
+
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ if (msg != NULL)
+ *msg = msghdr;
+
+ return msghdr->rc;
+}
+
+static int
+mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
+{
+ volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ uint32_t timeout = 0, sleep = 1;
+
+ rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
+ while (mdev->num_msgs > mdev->msgs_acked) {
+ rte_delay_us(sleep);
+ timeout += sleep;
+ if (timeout >= rst_timo) {
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase +
+ mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase +
+ mbox->rx_start);
+
+ otx2_err("MBOX[devid: %d] message wait timeout %d, "
+ "num_msgs: %d, msgs_acked: %d "
+ "(tx/rx num_msgs: %d/%d), msg_size: %d, "
+ "rsp_size: %d",
+ devid, timeout, mdev->num_msgs,
+ mdev->msgs_acked, tx_hdr->num_msgs,
+ rx_hdr->num_msgs, mdev->msg_size,
+ mdev->rsp_size);
+
+ return -EIO;
+ }
+ rte_rmb();
+ }
+ return 0;
+}
+
+int
+otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = 0;
+
+ /* Sync with mbox region */
+ rte_rmb();
+
+ if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
+ mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
+ /* In case of VF, Wait a bit more to account round trip delay */
+ tmo = tmo * 2;
+ }
+
+ /* Wait message */
+ if (rte_thread_is_intr())
+ rc = mbox_poll(mbox, tmo);
+ else
+ rc = mbox_wait(mbox, devid, tmo);
+
+ if (!rc)
+ rc = mdev->num_msgs;
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Wait for the mailbox response
+ */
+int
+otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
+}
+
+int
+otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int avail;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ avail = mbox->tx_size - mdev->msg_size - msgs_offset();
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return avail;
+}
+
+int
+otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
+{
+ struct ready_msg_rsp *rsp;
+ int rc;
+
+ otx2_mbox_alloc_msg_ready(mbox);
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->hdr.ver != OTX2_MBOX_VERSION) {
+ otx2_err("Incompatible MBox versions(AF: 0x%04x DPDK: 0x%04x)",
+ rsp->hdr.ver, OTX2_MBOX_VERSION);
+ return -EPIPE;
+ }
+
+ if (pcifunc)
+ *pcifunc = rsp->hdr.pcifunc;
+
+ return 0;
+}
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
+ uint16_t id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Convert mail box ID to name
+ */
+const char *otx2_mbox_id2name(uint16_t id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_MESSAGES
+ MBOX_UP_CGX_MESSAGES
+#undef M
+ default :
+ return "INVALID ID";
+ }
+}
+
+int otx2_mbox_id2size(uint16_t id)
+{
+ switch (id) {
+#define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);
+ MBOX_MESSAGES
+ MBOX_UP_CGX_MESSAGES
+#undef M
+ default :
+ return 0;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h
new file mode 100644
index 000000000..7fa4276e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h
@@ -0,0 +1,1773 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_MBOX_H__
+#define __OTX2_MBOX_H__
+
+#include <errno.h>
+#include <stdbool.h>
+
+#include <rte_ether.h>
+#include <rte_spinlock.h>
+
+#include <otx2_common.h>
+
+#define SZ_64K (64ULL * 1024ULL)
+#define SZ_1K (1ULL * 1024ULL)
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "Incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 3000 /* Time to wait for mbox response in ms */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+/* Device memory does not support unaligned access, instruct compiler to
+ * not optimize the memory access when working with mailbox memory.
+ */
+#define __otx2_io volatile
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ rte_spinlock_t mbox_lock;
+ uint16_t msg_size; /* Total msg size to be sent */
+ uint16_t rsp_size; /* Total rsp size to be sure the reply is ok */
+ uint16_t num_msgs; /* No of msgs sent or waiting for response */
+ uint16_t msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ uintptr_t hwbase; /* Mbox region advertised by HW */
+ uintptr_t reg_base;/* CSR base for this dev */
+ uint64_t trigger; /* Trigger mbox notification */
+ uint16_t tr_shift; /* Mbox trigger shift */
+ uint64_t rx_start; /* Offset of Rx region in mbox memory */
+ uint64_t tx_start; /* Offset of Tx region in mbox memory */
+ uint16_t rx_size; /* Size of Rx region */
+ uint16_t tx_size; /* Size of Tx region */
+ uint16_t ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+ uint64_t intr_offset; /* Offset to interrupt register */
+};
+
+/* Header which precedes all mbox messages */
+struct mbox_hdr {
+ uint64_t __otx2_io msg_size; /* Total msgs size embedded */
+ uint16_t __otx2_io num_msgs; /* No of msgs embedded */
+};
+
+/* Header which precedes every msg and is also part of it */
+struct mbox_msghdr {
+ uint16_t __otx2_io pcifunc; /* Who's sending this msg */
+ uint16_t __otx2_io id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ /* Signature, for validating corrupted msgs */
+ uint16_t __otx2_io sig;
+#define OTX2_MBOX_VERSION (0x0007)
+ /* Version of msg's structure for this ID */
+ uint16_t __otx2_io ver;
+ /* Offset of next msg within mailbox region */
+ uint16_t __otx2_io next_msgoff;
+ int __otx2_io rc; /* Msg processed response code */
+};
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach_req, msg_rsp)\
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach_req, msg_rsp)\
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get,\
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get,\
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg)\
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_SET_LINK_STATE, 0x214, cgx_set_link_state, \
+ cgx_set_link_state_msg, msg_rsp) \
+M(CGX_GET_PHY_MOD_TYPE, 0x215, cgx_get_phy_mod_type, msg_req, \
+ cgx_phy_mod_type) \
+M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
+ msg_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req, \
+ npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp)\
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, sso_lf_alloc_req, \
+ sso_lf_alloc_rsp) \
+M(SSO_LF_FREE, 0x601, sso_lf_free, sso_lf_free_req, msg_rsp) \
+M(SSOW_LF_ALLOC, 0x602, ssow_lf_alloc, ssow_lf_alloc_req, msg_rsp)\
+M(SSOW_LF_FREE, 0x603, ssow_lf_free, ssow_lf_free_req, msg_rsp) \
+M(SSO_HW_SETCONFIG, 0x604, sso_hw_setconfig, sso_hw_setconfig, \
+ msg_rsp) \
+M(SSO_GRP_SET_PRIORITY, 0x605, sso_grp_set_priority, sso_grp_priority, \
+ msg_rsp) \
+M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, sso_info_req, \
+ sso_grp_priority) \
+M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp) \
+M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg, \
+ msg_rsp) \
+M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, \
+ sso_grp_stats) \
+M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, \
+ sso_hws_stats) \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, tim_lf_alloc_req, \
+ tim_lf_alloc_rsp) \
+M(TIM_LF_FREE, 0x801, tim_lf_free, tim_ring_req, msg_rsp) \
+M(TIM_CONFIG_RING, 0x802, tim_config_ring, tim_config_req, msg_rsp)\
+M(TIM_ENABLE_RING, 0x803, tim_enable_ring, tim_ring_req, \
+ tim_enable_rsp) \
+M(TIM_DISABLE_RING, 0x804, tim_disable_ring, tim_ring_req, msg_rsp) \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ cpt_lf_alloc_rsp_msg) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_SET_CRYPTO_GRP, 0xA03, cpt_set_crypto_grp, \
+ cpt_set_crypto_grp_req_msg, \
+ msg_rsp) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
+M(CPT_RX_INLINE_LF_CFG, 0xBFE, cpt_rx_inline_lf_cfg, \
+ cpt_rx_inline_lf_cfg_msg, msg_rsp) \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, \
+ npc_mcam_alloc_entry_req, \
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, \
+ npc_mcam_shift_entry_req, \
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, \
+ msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, \
+ msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, \
+ msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry,\
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, msg_req, \
+ npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, \
+ npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, \
+ msg_rsp) \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, nix_lf_alloc_req, \
+ nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, \
+ nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, hwctx_disable_req, \
+ msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, nix_txsch_alloc_req, \
+ nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, \
+ msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, \
+ msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
+ nix_mark_format_cfg, \
+ nix_mark_format_cfg_rsp) \
+M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
+M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, nix_lso_format_cfg, \
+ nix_lso_format_cfg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, \
+ msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, \
+ msg_rsp) \
+M(NIX_SET_VLAN_TPID, 0x8015, nix_set_vlan_tpid, nix_set_vlan_tpid, \
+ msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp)\
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, \
+ nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, \
+ 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, \
+ msg_rsp) \
+M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, \
+ msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic response msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io sclk_feq; /* SCLK frequency */
+ uint16_t __otx2_io rclk_freq; /* RCLK frequency */
+};
+
+/* Struct to set pkind */
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_EDSA BIT_ULL(1)
+#define OTX2_PRIV_FLAGS_HIGIG BIT_ULL(2)
+#define OTX2_PRIV_FLAGS_LEN_90B BIT_ULL(3)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ uint64_t __otx2_io mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ uint8_t __otx2_io dir;
+ uint8_t __otx2_io pkind; /* valid only in case custom flag */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a certain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
+ */
+struct rsrc_attach_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io modify:1;
+ uint8_t __otx2_io npalf:1;
+ uint8_t __otx2_io nixlf:1;
+ uint16_t __otx2_io sso;
+ uint16_t __otx2_io ssow;
+ uint16_t __otx2_io timlfs;
+ uint16_t __otx2_io cptlfs;
+ uint16_t __otx2_io reelfs;
+ /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+ int __otx2_io cpt_blkaddr;
+ /* BLKADDR_REE0/BLKADDR_REE1 or 0 for BLKADDR_REE0 */
+ int __otx2_io ree_blkaddr;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io partial:1;
+ uint8_t __otx2_io npalf:1;
+ uint8_t __otx2_io nixlf:1;
+ uint8_t __otx2_io sso:1;
+ uint8_t __otx2_io ssow:1;
+ uint8_t __otx2_io timlfs:1;
+ uint8_t __otx2_io cptlfs:1;
+ uint8_t __otx2_io reelfs:1;
+};
+
+/* NIX Transmit schedulers */
+#define NIX_TXSCH_LVL_SMQ 0x0
+#define NIX_TXSCH_LVL_MDQ 0x0
+#define NIX_TXSCH_LVL_TL4 0x1
+#define NIX_TXSCH_LVL_TL3 0x2
+#define NIX_TXSCH_LVL_TL2 0x3
+#define NIX_TXSCH_LVL_TL1 0x4
+#define NIX_TXSCH_LVL_CNT 0x5
+
+/*
+ * Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io schq[NIX_TXSCH_LVL_CNT];
+ uint16_t __otx2_io sso;
+ uint16_t __otx2_io tim;
+ uint16_t __otx2_io ssow;
+ uint16_t __otx2_io cpt;
+ uint8_t __otx2_io npa;
+ uint8_t __otx2_io nix;
+ uint16_t __otx2_io schq_nix1[NIX_TXSCH_LVL_CNT];
+ uint8_t __otx2_io nix1;
+ uint8_t __otx2_io cpt1;
+ uint8_t __otx2_io ree0;
+ uint8_t __otx2_io ree1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io npa_msixoff;
+ uint16_t __otx2_io nix_msixoff;
+ uint8_t __otx2_io sso;
+ uint8_t __otx2_io ssow;
+ uint8_t __otx2_io timlfs;
+ uint8_t __otx2_io cptlfs;
+ uint16_t __otx2_io sso_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint8_t __otx2_io cpt1_lfs;
+ uint8_t __otx2_io ree0_lfs;
+ uint8_t __otx2_io ree1_lfs;
+ uint16_t __otx2_io cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 13
+#define CGX_TX_STATS_COUNT 18
+ uint64_t __otx2_io rx_stats[CGX_RX_STATS_COUNT];
+ uint64_t __otx2_io tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io fec_corr_blks;
+ uint64_t __otx2_io fec_uncorr_blks;
+};
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io max_dmac_filters;
+};
+
+struct cgx_link_user_info {
+ uint64_t __otx2_io link_up:1;
+ uint64_t __otx2_io full_duplex:1;
+ uint64_t __otx2_io lmac_type_id:4;
+ uint64_t __otx2_io speed:20; /* speed in Mbps */
+ uint64_t __otx2_io an:1; /* AN supported or not */
+ uint64_t __otx2_io fec:2; /* FEC type if enabled else 0 */
+ uint64_t __otx2_io port:8;
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+struct cgx_ptp_rx_info_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io ptp_en;
+};
+
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ uint8_t __otx2_io rx_pause;
+ uint8_t __otx2_io tx_pause;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ uint16_t __otx2_io sff_id;
+ uint8_t __otx2_io buf[SFP_EEPROM_SIZE];
+ uint64_t __otx2_io reserved;
+};
+
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+};
+
+struct phy_s {
+ uint64_t __otx2_io can_change_mod_type : 1;
+ uint64_t __otx2_io mod_type : 1;
+};
+
+struct cgx_lmac_fwdata_s {
+ uint16_t __otx2_io rw_valid;
+ uint64_t __otx2_io supported_fec;
+ uint64_t __otx2_io supported_an;
+ uint64_t __otx2_io supported_link_modes;
+ /* Only applicable if AN is supported */
+ uint64_t __otx2_io advertised_fec;
+ uint64_t __otx2_io advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1023
+ uint64_t __otx2_io reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int __otx2_io fec;
+};
+
+struct cgx_set_link_state_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io enable;
+};
+
+struct cgx_phy_mod_type {
+ struct mbox_msghdr hdr;
+ int __otx2_io mod;
+};
+
+struct cgx_set_link_mode_args {
+ uint32_t __otx2_io speed;
+ uint8_t __otx2_io duplex;
+ uint8_t __otx2_io an;
+ uint8_t __otx2_io ports;
+ uint64_t __otx2_io mode;
+};
+
+struct cgx_set_link_mode_req {
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int __otx2_io status;
+};
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+#define NPA_AURA_SZ_0 0
+#define NPA_AURA_SZ_128 1
+#define NPA_AURA_SZ_256 2
+#define NPA_AURA_SZ_512 3
+#define NPA_AURA_SZ_1K 4
+#define NPA_AURA_SZ_2K 5
+#define NPA_AURA_SZ_4K 6
+#define NPA_AURA_SZ_8K 7
+#define NPA_AURA_SZ_16K 8
+#define NPA_AURA_SZ_32K 9
+#define NPA_AURA_SZ_64K 10
+#define NPA_AURA_SZ_128K 11
+#define NPA_AURA_SZ_256K 12
+#define NPA_AURA_SZ_512K 13
+#define NPA_AURA_SZ_1M 14
+#define NPA_AURA_SZ_MAX 15
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ int __otx2_io aura_sz; /* No of auras. See NPA_AURA_SZ_* */
+ uint32_t __otx2_io nr_pools; /* No of pools */
+ uint64_t __otx2_io way_mask;
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io stack_pg_ptrs; /* No of ptrs per stack page */
+ uint32_t __otx2_io stack_pg_bytes; /* Size of stack page */
+ uint16_t __otx2_io qints; /* NPA_AF_CONST::QINTS */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io aura_id;
+ uint8_t __otx2_io ctype;
+ uint8_t __otx2_io op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ __otx2_io struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ __otx2_io struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ __otx2_io struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ __otx2_io struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ __otx2_io struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ __otx2_io struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io ctype;
+};
+
+/* NIX mbox message formats */
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
+ NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
+ NIX_AF_ERR_MARK_CFG_FAIL = -417,
+ NIX_AF_ERR_LSO_CFG_FAIL = -418,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint32_t __otx2_io rq_cnt; /* No of receive queues */
+ uint32_t __otx2_io sq_cnt; /* No of send queues */
+ uint32_t __otx2_io cq_cnt; /* No of completion queues */
+ uint8_t __otx2_io xqe_sz;
+ uint16_t __otx2_io rss_sz;
+ uint8_t __otx2_io rss_grps;
+ uint16_t __otx2_io npa_func;
+ /* RVU_DEFAULT_PF_FUNC == default pf_func associated with lf */
+ uint16_t __otx2_io sso_func;
+ uint64_t __otx2_io rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ uint64_t __otx2_io way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+ uint64_t flags;
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io sqb_size;
+ uint16_t __otx2_io rx_chan_base;
+ uint16_t __otx2_io tx_chan_base;
+ uint8_t __otx2_io rx_chan_cnt; /* Total number of RX channels */
+ uint8_t __otx2_io tx_chan_cnt; /* Total number of TX channels */
+ uint8_t __otx2_io lso_tsov4_idx;
+ uint8_t __otx2_io lso_tsov6_idx;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+ uint8_t __otx2_io lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ uint8_t __otx2_io lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ uint16_t __otx2_io cints; /* NIX_AF_CONST2::CINTS */
+ uint16_t __otx2_io qints; /* NIX_AF_CONST2::QINTS */
+ uint8_t __otx2_io hw_rx_tstamp_en; /*set if rx timestamping enabled */
+ uint8_t __otx2_io cgx_links; /* No. of CGX links present in HW */
+ uint8_t __otx2_io lbk_links; /* No. of LBK links present in HW */
+ uint8_t __otx2_io sdp_links; /* No. of SDP links present in HW */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ uint64_t __otx2_io flags;
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io qidx;
+ uint8_t __otx2_io ctype;
+ uint8_t __otx2_io op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RQ */
+ __otx2_io struct nix_rq_ctx_s rq;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_SQ */
+ __otx2_io struct nix_sq_ctx_s sq;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_CQ */
+ __otx2_io struct nix_cq_ctx_s cq;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RSS */
+ __otx2_io struct nix_rsse_s rss;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_MCE */
+ __otx2_io struct nix_rx_mce_s mce;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RQ */
+ __otx2_io struct nix_rq_ctx_s rq_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_SQ */
+ __otx2_io struct nix_sq_ctx_s sq_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_CQ */
+ __otx2_io struct nix_cq_ctx_s cq_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RSS */
+ __otx2_io struct nix_rsse_s rss_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_MCE */
+ __otx2_io struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ __otx2_io struct nix_rq_ctx_s rq;
+ __otx2_io struct nix_sq_ctx_s sq;
+ __otx2_io struct nix_cq_ctx_s cq;
+ __otx2_io struct nix_rsse_s rss;
+ __otx2_io struct nix_rx_mce_s mce;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ uint16_t __otx2_io schq_contig[NIX_TXSCH_LVL_CNT]; /* Contig. queues */
+ uint16_t __otx2_io schq[NIX_TXSCH_LVL_CNT]; /* Non-Contig. queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ uint16_t __otx2_io schq_contig[NIX_TXSCH_LVL_CNT]; /* Contig. queues */
+ uint16_t __otx2_io schq[NIX_TXSCH_LVL_CNT]; /* Non-Contig. queues */
+ /* Scheduler queue list allocated at each level */
+ uint16_t __otx2_io
+ schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ uint16_t __otx2_io schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ /* Traffic aggregation scheduler level */
+ uint8_t __otx2_io aggr_level;
+ /* Aggregation lvl's RR_PRIO config */
+ uint8_t __otx2_io aggr_lvl_rr_prio;
+ /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
+ uint8_t __otx2_io link_cfg_lvl;
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ uint16_t __otx2_io flags;
+ /* Scheduler queue level to be freed */
+ uint16_t __otx2_io schq_lvl;
+ /* List of scheduler queues to be freed */
+ uint16_t __otx2_io schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ uint8_t __otx2_io read;
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ uint8_t __otx2_io num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ uint64_t __otx2_io reg[MAX_REGS_PER_MBOX_MSG];
+ uint64_t __otx2_io regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ uint64_t __otx2_io regval_mask[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
+ uint8_t __otx2_io vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ uint8_t __otx2_io cfg_type;
+ union {
+ /* Valid when cfg_type is '0' */
+ struct {
+ uint64_t __otx2_io vtag0;
+ uint64_t __otx2_io vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ uint8_t __otx2_io cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ uint8_t __otx2_io cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int __otx2_io vtag0_idx;
+ int __otx2_io vtag1_idx;
+
+ /* Free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* Free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ uint8_t __otx2_io free_vtag0 :1;
+ /* Free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ uint8_t __otx2_io free_vtag1 :1;
+ } tx;
+
+ /* Valid when cfg_type is '1' */
+ struct {
+ /* Rx vtag type index, valid values are in 0..7 range */
+ uint8_t __otx2_io vtag_type;
+ /* Rx vtag strip */
+ uint8_t __otx2_io strip_vtag :1;
+ /* Rx vtag capture */
+ uint8_t __otx2_io capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+ int __otx2_io vtag0_idx;
+ int __otx2_io vtag1_idx;
+};
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int __otx2_io mcam_index; /* MCAM entry index to modify */
+ uint32_t __otx2_io flowkey_cfg; /* Flowkey types selected */
+#define FLOW_KEY_TYPE_PORT BIT(0)
+#define FLOW_KEY_TYPE_IPV4 BIT(1)
+#define FLOW_KEY_TYPE_IPV6 BIT(2)
+#define FLOW_KEY_TYPE_TCP BIT(3)
+#define FLOW_KEY_TYPE_UDP BIT(4)
+#define FLOW_KEY_TYPE_SCTP BIT(5)
+#define FLOW_KEY_TYPE_NVGRE BIT(6)
+#define FLOW_KEY_TYPE_VXLAN BIT(7)
+#define FLOW_KEY_TYPE_GENEVE BIT(8)
+#define FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define FLOW_KEY_TYPE_GTPU BIT(11)
+#define FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define FLOW_KEY_TYPE_CH_LEN_90B BIT(18)
+#define FLOW_KEY_TYPE_L4_DST BIT(28)
+#define FLOW_KEY_TYPE_L4_SRC BIT(29)
+#define FLOW_KEY_TYPE_L3_DST BIT(30)
+#define FLOW_KEY_TYPE_L3_SRC BIT(31)
+ uint8_t __otx2_io group; /* RSS context or group */
+};
+
+struct nix_rss_flowkey_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io alg_idx; /* Selected algo index */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+struct nix_mark_format_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io offset;
+ uint8_t __otx2_io y_mask;
+ uint8_t __otx2_io y_val;
+ uint8_t __otx2_io r_mask;
+ uint8_t __otx2_io r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mark_format_idx;
+};
+
+struct nix_lso_format_cfg {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io field_mask;
+ uint64_t __otx2_io fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io lso_format_idx;
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+ uint16_t __otx2_io mode;
+};
+
+struct nix_rx_cfg {
+ struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY BIT(0)
+#define NIX_RX_OL4_VERIFY BIT(1)
+ uint8_t __otx2_io len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
+ uint8_t __otx2_io csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io update_smq; /* Update SMQ's min/max lens */
+ uint8_t __otx2_io update_minlen; /* Set minlen also */
+ uint8_t __otx2_io sdp_link; /* Set SDP RX link */
+ uint16_t __otx2_io maxlen;
+ uint16_t __otx2_io minlen;
+};
+
+struct nix_set_vlan_tpid {
+ struct mbox_msghdr hdr;
+#define NIX_VLAN_TYPE_INNER 0
+#define NIX_VLAN_TYPE_OUTER 1
+ uint8_t __otx2_io vlan_type;
+ uint16_t __otx2_io tpid;
+};
+
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io chan_base; /* Starting channel number */
+ uint8_t __otx2_io chan_cnt; /* Number of channels */
+ uint8_t __otx2_io bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ /* Channel and bpid mapping */
+ uint16_t __otx2_io chan_bpid[NIX_MAX_CHAN];
+ /* Number of channel for which bpids are assigned */
+ uint8_t __otx2_io chan_cnt;
+};
+
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io cpt_credit;
+ struct {
+ uint8_t __otx2_io egrp;
+ uint8_t __otx2_io opcode;
+ } gen_cfg;
+ struct {
+ uint16_t __otx2_io cpt_pf_func;
+ uint8_t __otx2_io cpt_slot;
+ } inst_qsel;
+ uint8_t __otx2_io enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io sa_base_addr;
+ struct {
+ uint32_t __otx2_io tag_const;
+ uint16_t __otx2_io lenm1_max;
+ uint8_t __otx2_io sa_pow2_size;
+ uint8_t __otx2_io tt;
+ } ipsec_cfg0;
+ struct {
+ uint32_t __otx2_io sa_idx_max;
+ uint8_t __otx2_io sa_idx_w;
+ } ipsec_cfg1;
+ uint8_t __otx2_io enable;
+};
+
+/* SSO mailbox error codes
+ * Range 501 - 600.
+ */
+enum sso_af_status {
+ SSO_AF_ERR_PARAM = -501,
+ SSO_AF_ERR_LF_INVALID = -502,
+ SSO_AF_ERR_AF_LF_ALLOC = -503,
+ SSO_AF_ERR_GRP_EBUSY = -504,
+ SSO_AF_INVAL_NPA_PF_FUNC = -505,
+};
+
+struct sso_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hwgrps;
+};
+
+struct sso_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io xaq_buf_size;
+ uint32_t __otx2_io xaq_wq_entries;
+ uint32_t __otx2_io in_unit_entries;
+ uint16_t __otx2_io hwgrps;
+};
+
+struct sso_lf_free_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hwgrps;
+};
+
+/* SSOW mailbox error codes
+ * Range 601 - 700.
+ */
+enum ssow_af_status {
+ SSOW_AF_ERR_PARAM = -601,
+ SSOW_AF_ERR_LF_INVALID = -602,
+ SSOW_AF_ERR_AF_LF_ALLOC = -603,
+};
+
+struct ssow_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hws;
+};
+
+struct ssow_lf_free_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hws;
+};
+
+struct sso_hw_setconfig {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io npa_aura_id;
+ uint16_t __otx2_io npa_pf_func;
+ uint16_t __otx2_io hwgrps;
+};
+
+struct sso_info_req {
+ struct mbox_msghdr hdr;
+ union {
+ uint16_t __otx2_io grp;
+ uint16_t __otx2_io hws;
+ };
+};
+
+struct sso_grp_priority {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io grp;
+ uint8_t __otx2_io priority;
+ uint8_t __otx2_io affinity;
+ uint8_t __otx2_io weight;
+};
+
+struct sso_grp_qos_cfg {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io grp;
+ uint32_t __otx2_io xaq_limit;
+ uint16_t __otx2_io taq_thr;
+ uint16_t __otx2_io iaq_thr;
+};
+
+struct sso_grp_stats {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io grp;
+ uint64_t __otx2_io ws_pc;
+ uint64_t __otx2_io ext_pc;
+ uint64_t __otx2_io wa_pc;
+ uint64_t __otx2_io ts_pc;
+ uint64_t __otx2_io ds_pc;
+ uint64_t __otx2_io dq_pc;
+ uint64_t __otx2_io aw_status;
+ uint64_t __otx2_io page_cnt;
+};
+
+struct sso_hws_stats {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io hws;
+ uint64_t __otx2_io arbitration;
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io reg_offset;
+ uint64_t __otx2_io *ret_val;
+ uint64_t __otx2_io val;
+ uint8_t __otx2_io is_write;
+};
+
+struct cpt_set_crypto_grp_req_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io crypto_eng_grp;
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io nix_pf_func;
+ uint16_t __otx2_io sso_pf_func;
+};
+
+struct cpt_lf_alloc_rsp_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io crypto_eng_grp;
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io enable;
+ uint8_t __otx2_io slot;
+ uint8_t __otx2_io dir;
+ uint16_t __otx2_io sso_pf_func; /* Inbound path SSO_PF_FUNC */
+ uint16_t __otx2_io nix_pf_func; /* Outbound path NIX_PF_FUNC */
+};
+
+struct cpt_rx_inline_lf_cfg_msg {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io sso_pf_func;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+ NPC_AF_ERR_HIGIG_CONFIG_FAIL = -705,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ uint8_t __otx2_io contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ uint8_t __otx2_io priority; /* Lower or higher w.r.t ref_entry */
+ uint16_t __otx2_io ref_entry;
+ uint16_t __otx2_io count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ /* Entry alloc'ed or start index if contiguous.
+ * Invalid in case of non-contiguous.
+ */
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io count; /* Number of entries allocated */
+ uint16_t __otx2_io free_count; /* Number of entries available */
+ uint16_t __otx2_io entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry; /* Entry index to be freed */
+ uint8_t __otx2_io all; /* Free all entries alloc'ed to this PFVF */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max key width */
+ uint64_t __otx2_io kw[NPC_MAX_KWS_IN_KEY];
+ uint64_t __otx2_io kw_mask[NPC_MAX_KWS_IN_KEY];
+ uint64_t __otx2_io action;
+ uint64_t __otx2_io vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ uint16_t __otx2_io entry; /* MCAM entry to write this match key */
+ uint16_t __otx2_io cntr; /* Counter for this MCAM entry */
+ uint8_t __otx2_io intf; /* Rx or Tx interface */
+ uint8_t __otx2_io enable_entry;/* Enable this MCAM entry ? */
+ uint8_t __otx2_io set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ uint16_t __otx2_io curr_entry[NPC_MCAM_MAX_SHIFTS];
+ uint16_t __otx2_io new_entry[NPC_MCAM_MAX_SHIFTS];
+ uint16_t __otx2_io shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ /* Index in 'curr_entry', not entry itself */
+ uint16_t __otx2_io failed_entry_idx;
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ uint16_t __otx2_io count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ /* Counter alloc'ed or start idx if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ uint16_t __otx2_io cntr;
+ uint16_t __otx2_io count; /* Number of counters allocated */
+ uint16_t __otx2_io cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ /* valid only while fetching counter's stats */
+ uint64_t __otx2_io stat;
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io cntr;
+ uint16_t __otx2_io entry; /* Entry and counter to be unmapped */
+ uint8_t __otx2_io all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ uint16_t __otx2_io ref_entry;
+ uint8_t __otx2_io priority; /* Lower or higher w.r.t ref_entry */
+ uint8_t __otx2_io intf; /* Rx or Tx interface */
+ uint8_t __otx2_io enable_entry;/* Enable this MCAM entry ? */
+ uint8_t __otx2_io alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ uint64_t __otx2_io tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ uint64_t __otx2_io kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ uint64_t __otx2_io
+ intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ uint64_t __otx2_io
+ intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+ uint8_t __otx2_io mkex_pfl_name[MKEX_NAME_LEN];
+};
+
+enum header_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_FDSA_VAL,
+ NPC_HEADER_FIELDS_MAX,
+};
+
+struct flow_msg {
+ unsigned char __otx2_io dmac[6];
+ unsigned char __otx2_io smac[6];
+ uint16_t __otx2_io etype;
+ uint16_t __otx2_io vlan_etype;
+ uint16_t __otx2_io vlan_tci;
+ union {
+ uint32_t __otx2_io ip4src;
+ uint32_t __otx2_io ip6src[4];
+ };
+ union {
+ uint32_t __otx2_io ip4dst;
+ uint32_t __otx2_io ip6dst[4];
+ };
+ uint8_t __otx2_io tos;
+ uint8_t __otx2_io ip_ver;
+ uint8_t __otx2_io ip_proto;
+ uint8_t __otx2_io tc;
+ uint16_t __otx2_io sport;
+ uint16_t __otx2_io dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ uint64_t __otx2_io features;
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io channel;
+ uint8_t __otx2_io intf;
+ uint8_t __otx2_io set_cntr;
+ uint8_t __otx2_io default_rule;
+ /* Overwrite(0) or append(1) flow to default rule? */
+ uint8_t __otx2_io append;
+ uint16_t __otx2_io vf;
+ /* action */
+ uint32_t __otx2_io index;
+ uint16_t __otx2_io match_id;
+ uint8_t __otx2_io flow_key_alg;
+ uint8_t __otx2_io op;
+ /* vtag action */
+ uint8_t __otx2_io vtag0_type;
+ uint8_t __otx2_io vtag0_valid;
+ uint8_t __otx2_io vtag1_type;
+ uint8_t __otx2_io vtag1_valid;
+
+ /* vtag tx action */
+ uint16_t __otx2_io vtag0_def;
+ uint8_t __otx2_io vtag0_op;
+ uint16_t __otx2_io vtag1_def;
+ uint8_t __otx2_io vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ /* Negative if no counter else counter number */
+ int __otx2_io counter;
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io start;/*Disable range of entries */
+ uint16_t __otx2_io end;
+ uint8_t __otx2_io all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ /* MCAM entry to read */
+ uint16_t __otx2_io entry;
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ uint8_t __otx2_io intf;
+ uint8_t __otx2_io enable;
+};
+
+/* TIM mailbox error codes
+ * Range 801 - 900.
+ */
+enum tim_af_status {
+ TIM_AF_NO_RINGS_LEFT = -801,
+ TIM_AF_INVALID_NPA_PF_FUNC = -802,
+ TIM_AF_INVALID_SSO_PF_FUNC = -803,
+ TIM_AF_RING_STILL_RUNNING = -804,
+ TIM_AF_LF_INVALID = -805,
+ TIM_AF_CSIZE_NOT_ALIGNED = -806,
+ TIM_AF_CSIZE_TOO_SMALL = -807,
+ TIM_AF_CSIZE_TOO_BIG = -808,
+ TIM_AF_INTERVAL_TOO_SMALL = -809,
+ TIM_AF_INVALID_BIG_ENDIAN_VALUE = -810,
+ TIM_AF_INVALID_CLOCK_SOURCE = -811,
+ TIM_AF_GPIO_CLK_SRC_NOT_ENABLED = -812,
+ TIM_AF_INVALID_BSIZE = -813,
+ TIM_AF_INVALID_ENABLE_PERIODIC = -814,
+ TIM_AF_INVALID_ENABLE_DONTFREE = -815,
+ TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,
+ TIM_AF_RING_ALREADY_DISABLED = -817,
+};
+
+enum tim_clk_srcs {
+ TIM_CLK_SRCS_TENNS = 0,
+ TIM_CLK_SRCS_GPIO = 1,
+ TIM_CLK_SRCS_GTI = 2,
+ TIM_CLK_SRCS_PTP = 3,
+ TIM_CLK_SRSC_INVALID,
+};
+
+enum tim_gpio_edge {
+ TIM_GPIO_NO_EDGE = 0,
+ TIM_GPIO_LTOH_TRANS = 1,
+ TIM_GPIO_HTOL_TRANS = 2,
+ TIM_GPIO_BOTH_TRANS = 3,
+ TIM_GPIO_INVALID,
+};
+
+enum ptp_op {
+ PTP_OP_ADJFINE = 0, /* adjfine(req.scaled_ppm); */
+ PTP_OP_GET_CLOCK = 1, /* rsp.clk = get_clock() */
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io op;
+ int64_t __otx2_io scaled_ppm;
+ uint8_t __otx2_io is_pmu;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io clk;
+ uint64_t __otx2_io tsc;
+};
+
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ /* Schq mapping fixed or flexible */
+ uint8_t __otx2_io nix_fixed_txschq_mapping;
+ uint8_t __otx2_io nix_shaping; /* Is shaping and coloring supported */
+};
+
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io nix_lf_tx_sync;
+ uint8_t __otx2_io nix_lf_rx_sync;
+ uint8_t __otx2_io npa_lf_sync;
+};
+
+struct tim_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io ring;
+ uint16_t __otx2_io npa_pf_func;
+ uint16_t __otx2_io sso_pf_func;
+};
+
+struct tim_ring_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io ring;
+};
+
+struct tim_config_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io ring;
+ uint8_t __otx2_io bigendian;
+ uint8_t __otx2_io clocksource;
+ uint8_t __otx2_io enableperiodic;
+ uint8_t __otx2_io enabledontfreebuffer;
+ uint32_t __otx2_io bucketsize;
+ uint32_t __otx2_io chunksize;
+ uint32_t __otx2_io interval;
+};
+
+struct tim_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io tenns_clk;
+};
+
+struct tim_enable_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io timestarted;
+ uint32_t __otx2_io currentbucket;
+};
+
+__rte_internal
+const char *otx2_mbox_id2name(uint16_t id);
+int otx2_mbox_id2size(uint16_t id);
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
+ int direction, int ndevsi, uint64_t intr_offset);
+void otx2_mbox_fini(struct otx2_mbox *mbox);
+__rte_internal
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+__rte_internal
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo);
+__rte_internal
+int otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg);
+__rte_internal
+int otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
+ uint32_t tmo);
+int otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid);
+__rte_internal
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+
+static inline struct mbox_msghdr *
+otx2_mbox_alloc_msg(struct otx2_mbox *mbox, int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+static inline void
+otx2_mbox_req_init(uint16_t mbox_id, void *msghdr)
+{
+ struct mbox_msghdr *hdr = msghdr;
+
+ hdr->sig = OTX2_MBOX_REQ_SIG;
+ hdr->ver = OTX2_MBOX_VERSION;
+ hdr->id = mbox_id;
+ hdr->pcifunc = 0;
+}
+
+static inline void
+otx2_mbox_rsp_init(uint16_t mbox_id, void *msghdr)
+{
+ struct mbox_msghdr *hdr = msghdr;
+
+ hdr->sig = OTX2_MBOX_RSP_SIG;
+ hdr->rc = -ETIMEDOUT;
+ hdr->id = mbox_id;
+}
+
+static inline bool
+otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+
+static inline int
+otx2_mbox_process(struct otx2_mbox *mbox)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp(mbox, 0, NULL);
+}
+
+static inline int
+otx2_mbox_process_msg(struct otx2_mbox *mbox, void **msg)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp(mbox, 0, msg);
+}
+
+static inline int
+otx2_mbox_process_tmo(struct otx2_mbox *mbox, uint32_t tmo)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp_tmo(mbox, 0, NULL, tmo);
+}
+
+static inline int
+otx2_mbox_process_msg_tmo(struct otx2_mbox *mbox, void **msg, uint32_t tmo)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp_tmo(mbox, 0, msg, tmo);
+}
+
+int otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pf_func /* out */);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pf_func,
+ uint16_t id);
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static inline struct _req_type \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct otx2_mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ otx2_mbox_dbg("id=0x%x (%s)", \
+ req->hdr.id, otx2_mbox_id2name(req->hdr.id)); \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+/* This is required for copy operations from device memory which do not work on
+ * addresses which are unaligned to 16B. This is because of specific
+ * optimizations to libc memcpy.
+ */
+static inline volatile void *
+otx2_mbox_memcpy(volatile void *d, const volatile void *s, size_t l)
+{
+ const volatile uint8_t *sb;
+ volatile uint8_t *db;
+ size_t i;
+
+ if (!d || !s)
+ return NULL;
+ db = (volatile uint8_t *)d;
+ sb = (const volatile uint8_t *)s;
+ for (i = 0; i < l; i++)
+ db[i] = sb[i];
+ return d;
+}
+
+/* This is required for memory operations from device memory which do not
+ * work on addresses which are unaligned to 16B. This is because of specific
+ * optimizations to libc memset.
+ */
+static inline void
+otx2_mbox_memset(volatile void *d, uint8_t val, size_t l)
+{
+ volatile uint8_t *db;
+ size_t i = 0;
+
+ if (!d || !l)
+ return;
+ db = (volatile uint8_t *)d;
+ for (i = 0; i < l; i++)
+ db[i] = val;
+}
+
+#endif /* __OTX2_MBOX_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c
new file mode 100644
index 000000000..6e9643c38
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_atomic.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_spinlock.h>
+
+#include "otx2_common.h"
+#include "otx2_sec_idev.h"
+
+static struct otx2_sec_idev_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
+
+/**
+ * @internal
+ * Check if rte_eth_dev is security offload capable otx2_eth_dev
+ */
+uint8_t
+otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF)
+ return 1;
+
+ return 0;
+}
+
+int
+otx2_sec_idev_cfg_init(int port_id)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ int i;
+
+ cfg = &sec_cfg[port_id];
+ cfg->tx_cpt_idx = 0;
+ rte_spinlock_init(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ cfg->tx_cpt[i].qp = NULL;
+ rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
+ }
+
+ return 0;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ int i, ret;
+
+ if (qp == NULL || port_id >= OTX2_MAX_INLINE_PORTS)
+ return -EINVAL;
+
+ cfg = &sec_cfg[port_id];
+
+ /* Find a free slot to save CPT LF */
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp == NULL) {
+ cfg->tx_cpt[i].qp = qp;
+ ret = 0;
+ goto unlock;
+ }
+ }
+
+ ret = -EINVAL;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t port_id;
+ int i, ret;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
+ cfg = &sec_cfg[port_id];
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp != qp)
+ continue;
+
+ /* Don't free if the QP is in use by any sec session */
+ if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
+ ret = -EBUSY;
+ } else {
+ cfg->tx_cpt[i].qp = NULL;
+ ret = 0;
+ }
+
+ goto unlock;
+ }
+
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ }
+
+ return -ENOENT;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t index;
+ int i, ret;
+
+ if (port_id >= OTX2_MAX_INLINE_PORTS || qp == NULL)
+ return -EINVAL;
+
+ cfg = &sec_cfg[port_id];
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ index = cfg->tx_cpt_idx;
+
+ /* Get the next index with valid data */
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[index].qp != NULL)
+ break;
+ index = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
+ }
+
+ if (i >= OTX2_MAX_CPT_QP_PER_PORT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ *qp = cfg->tx_cpt[index].qp;
+ rte_atomic16_inc(&cfg->tx_cpt[index].ref_cnt);
+
+ cfg->tx_cpt_idx = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t port_id;
+ int i;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
+ cfg = &sec_cfg[port_id];
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp == qp) {
+ rte_atomic16_dec(&cfg->tx_cpt[i].ref_cnt);
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h
new file mode 100644
index 000000000..89cdaf66a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_SEC_IDEV_H_
+#define _OTX2_SEC_IDEV_H_
+
+#include <rte_ethdev.h>
+
+#define OTX2_MAX_CPT_QP_PER_PORT 64
+#define OTX2_MAX_INLINE_PORTS 64
+
+struct otx2_cpt_qp;
+
+struct otx2_sec_idev_cfg {
+ struct {
+ struct otx2_cpt_qp *qp;
+ rte_atomic16_t ref_cnt;
+ } tx_cpt[OTX2_MAX_CPT_QP_PER_PORT];
+
+ uint16_t tx_cpt_idx;
+ rte_spinlock_t tx_cpt_lock;
+};
+
+__rte_internal
+uint8_t otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev);
+
+__rte_internal
+int otx2_sec_idev_cfg_init(int port_id);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp);
+
+#endif /* _OTX2_SEC_IDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map b/src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map
new file mode 100644
index 000000000..d26bd7117
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map
@@ -0,0 +1,45 @@
+DPDK_20.0 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ otx2_dev_active_vfs;
+ otx2_dev_fini;
+ otx2_dev_priv_init;
+ otx2_disable_irqs;
+ otx2_eth_dev_is_sec_capable;
+ otx2_intra_dev_get_cfg;
+ otx2_logtype_base;
+ otx2_logtype_dpi;
+ otx2_logtype_ep;
+ otx2_logtype_mbox;
+ otx2_logtype_nix;
+ otx2_logtype_npa;
+ otx2_logtype_npc;
+ otx2_logtype_sso;
+ otx2_logtype_tim;
+ otx2_logtype_tm;
+ otx2_mbox_alloc_msg_rsp;
+ otx2_mbox_get_rsp;
+ otx2_mbox_get_rsp_tmo;
+ otx2_mbox_id2name;
+ otx2_mbox_msg_send;
+ otx2_mbox_wait_for_rsp;
+ otx2_npa_lf_active;
+ otx2_npa_lf_obj_get;
+ otx2_npa_lf_obj_ref;
+ otx2_npa_pf_func_get;
+ otx2_npa_set_defaults;
+ otx2_parse_common_devargs;
+ otx2_register_irq;
+ otx2_sec_idev_cfg_init;
+ otx2_sec_idev_tx_cpt_qp_add;
+ otx2_sec_idev_tx_cpt_qp_get;
+ otx2_sec_idev_tx_cpt_qp_put;
+ otx2_sec_idev_tx_cpt_qp_remove;
+ otx2_sso_pf_func_get;
+ otx2_sso_pf_func_set;
+ otx2_unregister_irq;
+};
diff --git a/src/spdk/dpdk/drivers/common/qat/Makefile b/src/spdk/dpdk/drivers/common/qat/Makefile
new file mode 100644
index 000000000..28bd5668f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/Makefile
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# build directories
+QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat
+QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat
+VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR)
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I$(QAT_CRYPTO_DIR)
+CFLAGS += -I$(QAT_COMPRESS_DIR)
+
+
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+ LDLIBS += -lrte_compressdev
+ SRCS-y += qat_comp.c
+ SRCS-y += qat_comp_pmd.c
+ build_qat = yes
+endif
+
+# library symmetric crypto source files
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_ASYM),y)
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lcrypto
+ CFLAGS += -DBUILD_QAT_ASYM
+ SRCS-y += qat_asym.c
+ SRCS-y += qat_asym_pmd.c
+ build_qat = yes
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lcrypto
+ CFLAGS += -DBUILD_QAT_SYM
+ SRCS-y += qat_sym.c
+ SRCS-y += qat_sym_session.c
+ SRCS-y += qat_sym_pmd.c
+ build_qat = yes
+endif
+endif
+
+ifdef build_qat
+
+ # library name
+ LIB = librte_pmd_qat.a
+
+ # build flags
+ CFLAGS += $(WERROR_FLAGS)
+ CFLAGS += -O3
+
+ # library common source files
+ SRCS-y += qat_device.c
+ SRCS-y += qat_common.c
+ SRCS-y += qat_logs.c
+ SRCS-y += qat_qp.c
+
+ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+ LDLIBS += -lrte_pci -lrte_bus_pci
+
+ # export include files
+ SYMLINK-y-include +=
+
+ # versioning export map
+ EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/qat/meson.build b/src/spdk/dpdk/drivers/common/qat/meson.build
new file mode 100644
index 000000000..8de249289
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# This does not build a driver, but instead holds common files for
+# the crypto and compression drivers.
+build = false
+reason = '' # sentinal value to suppress printout
+qat_deps = ['bus_pci']
+qat_sources = files('qat_common.c',
+ 'qat_qp.c',
+ 'qat_device.c',
+ 'qat_logs.c')
+qat_includes = [include_directories('.', 'qat_adf')]
+qat_ext_deps = []
+qat_cflags = []
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
new file mode 100644
index 000000000..1eef5513f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include <rte_io.h>
+
+/* CSR write macro */
+#define ADF_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ADF_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+#define ADF_RING_EMPTY_SIG_BYTE 0x7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Maximum number of qps on a device for any service type */
+#define ADF_MAX_QPS_ON_ANY_SERVICE 2
+#define ADF_RING_DIR_TX 0
+#define ADF_RING_DIR_RX 1
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h
new file mode 100644
index 000000000..be10fc9bd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <sys/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+ uint32_t src_length;
+ uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ uint8_t resrvd1;
+ uint8_t service_cmd_id;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t serv_specif_flags;
+ uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ uint8_t xlat_err_code;
+ uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ uint8_t resrvd1;
+ uint8_t service_id;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ uint8_t comn_status;
+ uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+#define QAT_COMN_EXT_FLAGS_BITPOS 8
+#define QAT_COMN_EXT_FLAGS_MASK 0x1
+#define QAT_COMN_EXT_FLAGS_USED 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK)) \
+ } while (0)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \
+ } while (0)
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
+#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \
+ QAT_COMN_RESP_XLT_WA_APPLIED_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
+ QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14
+#define ERR_CODE_HW_INCOMPLETE_FILE -15
+#define ERR_CODE_SSM_ERROR -16
+#define ERR_CODE_ENDPOINT_ERROR -17
+#define ERR_CODE_CNV_ERROR -18
+#define ERR_CODE_EMPTY_DYM_BLOCK -19
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22
+#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
new file mode 100644
index 000000000..c89a2c2fd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -0,0 +1,555 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+ ICP_QAT_FW_COMP_CMD_STATIC = 0,
+ /*!< Static Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+ /*!< Dynamic Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ /*!< Decompress Request */
+
+ ICP_QAT_FW_COMP_CMD_DELIMITER
+ /**< Delimiter type */
+};
+
+/**< Flag usage */
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateless
+ */
+
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateful
+ */
+
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is DISABLED.
+ */
+
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is ENABLED.
+ */
+
+/**< Flag mask & bit position */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the session type
+ */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine the session type
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for disabling type zero header write back
+ * when Enhanced autoselect best is enabled. If set firmware does
+ * not return type0 store block header, only copies src to dest.
+ * (if best output is Type0)
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for flag used to disable secure ram from
+ * being used as an intermediate buffer.
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for disable secure ram for use as an intermediate
+ * buffer.
+ */
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+ ret_uncomp, secure_ram) \
+ ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \
+ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+ (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+ (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+ (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \
+ << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+ (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \
+ << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+union icp_qat_fw_comp_req_hdr_cd_pars {
+ /**< LWs 2-5 */
+ struct {
+ uint64_t content_desc_addr;
+ /**< Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /**< Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /**< Size of the content descriptor parameters in quad words.
+ * These parameters describe the session setup configuration
+ * info for the slices that this request relies upon i.e.
+ * the configuration word and cipher key needed by the cipher
+ * slice if there is a request for cipher processing.
+ */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /**< Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /**< Content descriptor reserved field */
+ } s;
+
+ struct {
+ uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /* Compression Slice Config Word */
+
+ uint32_t content_desc_resrvd4;
+ /**< Content descriptor reserved field */
+
+ } sl;
+
+};
+
+struct icp_qat_fw_comp_req_params {
+ /**< LW 14 */
+ uint32_t comp_len;
+ /**< Size of input to process in bytes Note: Only EOP requests can be
+ * odd for decompression. IA must set LSB to zero for odd sized
+ * intermediate inputs
+ */
+
+ /**< LW 15 */
+ uint32_t out_buffer_sz;
+ /**< Size of output buffer in bytes */
+
+ /**< LW 16 */
+ uint32_t initial_crc32;
+ /**< CRC of previously processed bytes */
+
+ /**< LW 17 */
+ uint32_t initial_adler;
+ /**< Adler of previously processed bytes */
+
+ /**< LW 18 */
+ uint32_t req_par_flags;
+
+ /**< LW 19 */
+ uint32_t rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \
+ ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \
+ (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \
+ (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \
+ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+ ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \
+ ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \
+ << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_SOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_EOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS End of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is not the last block
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is the last block
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv check is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv check IS to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for SOP
+ */
+
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine SOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for EOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine EOP
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV Recovery bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV Recovery bit
+ */
+
+struct icp_qat_fw_xlt_req_params {
+ /**< LWs 20-21 */
+ uint64_t inter_buff_ptr;
+ /**< This field specifies the physical address of an intermediate
+ * buffer SGL array. The array contains a pair of 64-bit
+ * intermediate buffer pointers to SGL buffer descriptors, one pair
+ * per CPM. Please refer to the CPM1.6 Firmware Interface HLD
+ * specification for more details.
+ */
+};
+
+
+struct icp_qat_fw_comp_cd_hdr {
+ /**< LW 24 */
+ uint16_t ram_bank_flags;
+ /**< Flags to show which ram banks to access */
+
+ uint8_t comp_cfg_offset;
+ /**< Quad word offset from the content descriptor parameters address
+ * to the parameters for the compression processing
+ */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the compressed data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after compression
+ * Current Id: Initialised with the compression slice type
+ */
+
+ /**< LW 25 */
+ uint32_t resrvd;
+ /**< LWs 26-27 */
+
+ uint64_t comp_state_addr;
+ /**< Pointer to compression state */
+
+ /**< LWs 28-29 */
+ uint64_t ram_banks_addr;
+ /**< Pointer to banks */
+
+};
+
+
+struct icp_qat_fw_xlt_cd_hdr {
+ /**< LW 30 */
+ uint16_t resrvd1;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t resrvd2;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the translated data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after compression
+ * Current Id: Initialised with the translation slice type
+ */
+
+ /**< LW 31 */
+ uint32_t resrvd3;
+ /**< Reserved and should be set to zero, needed for quadword
+ * alignment
+ */
+};
+
+struct icp_qat_fw_comp_req {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ /**< Common request header - for Service Command Id,
+ * use service-specific Compression Command Id.
+ * Service Specific Flags - use Compression Command Flags
+ */
+
+ /**< LWs 2-5 */
+ union icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+ /**< Compression service-specific content descriptor field which points
+ * either to a content descriptor parameter block or contains the
+ * compression slice config word.
+ */
+
+ /**< LWs 6-13 */
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ /**< Common request middle section */
+
+ /**< LWs 14-19 */
+ struct icp_qat_fw_comp_req_params comp_pars;
+ /**< Compression request Parameters block */
+
+ /**< LWs 20-21 */
+ union {
+ struct icp_qat_fw_xlt_req_params xlt_pars;
+ /**< Translation request Parameters block */
+ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+
+ } u1;
+
+ /**< LWs 22-23 */
+ union {
+ uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved - not used if Batch and Pack is disabled.*/
+
+ uint64_t bnp_res_table_addr;
+ /**< A generic pointer to the unbounded list of
+ * icp_qat_fw_resp_comp_pars members. This pointer is only
+ * used when the Batch and Pack is enabled.
+ */
+ } u3;
+
+ /**< LWs 24-29 */
+ struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+ /**< Compression request content descriptor control block header */
+
+ /**< LWs 30-31 */
+ union {
+ struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+ /**< Translation request content descriptor
+ * control block header
+ */
+
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+ } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+ /**< LW 4 */
+ uint32_t input_byte_counter;
+ /**< Input byte counter */
+
+ /**< LW 5 */
+ uint32_t output_byte_counter;
+ /**< Output byte counter */
+
+ /**< LW 6 & 7*/
+ union {
+ uint64_t curr_chksum;
+ struct {
+ /**< LW 6 */
+ uint32_t curr_crc32;
+ /**< LW 7 */
+ uint32_t curr_adler_32;
+ };
+ };
+};
+
+struct icp_qat_fw_comp_resp {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ /**< Common interface response format see icp_qat_fw.h */
+
+ /**< LWs 2-3 */
+ uint64_t opaque_data;
+ /**< Opaque data passed from the request to the response message */
+
+ /**< LWs 4-7 */
+ struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+ /**< Common response params (checksums and byte counts) */
+};
+
+/* RAM Bank definitions */
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_comp
+ * Definition of the ram bank enabled values
+ * @description
+ * Enumeration used to define whether a ram bank is enabled or not
+ *
+ *****************************************************************************/
+enum icp_qat_fw_comp_bank_enabled {
+ ICP_QAT_FW_COMP_BANK_DISABLED = 0, /*!< BANK DISABLED */
+ ICP_QAT_FW_COMP_BANK_ENABLED = 1, /*!< BANK ENABLED */
+ ICP_QAT_FW_COMP_BANK_DELIMITER = 2 /**< Delimiter type */
+};
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *
+ * @description
+ * Build the ram bank flags in the compression content descriptor
+ * which specify which banks are used to save history
+ *
+ * @param bank_i_enable
+ * @param bank_h_enable
+ * @param bank_g_enable
+ * @param bank_f_enable
+ * @param bank_e_enable
+ * @param bank_d_enable
+ * @param bank_c_enable
+ * @param bank_b_enable
+ * @param bank_a_enable
+ *****************************************************************************/
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, \
+ bank_h_enable, \
+ bank_g_enable, \
+ bank_f_enable, \
+ bank_e_enable, \
+ bank_d_enable, \
+ bank_c_enable, \
+ bank_b_enable, \
+ bank_a_enable) \
+ ((((bank_i_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_I_BITPOS) | \
+ (((bank_h_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_H_BITPOS) | \
+ (((bank_g_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_G_BITPOS) | \
+ (((bank_f_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_F_BITPOS) | \
+ (((bank_e_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_E_BITPOS) | \
+ (((bank_d_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_D_BITPOS) | \
+ (((bank_c_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_C_BITPOS) | \
+ (((bank_b_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_B_BITPOS) | \
+ (((bank_a_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
+ << QAT_FW_COMP_BANK_A_BITPOS))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h
new file mode 100644
index 000000000..20eb145de
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_BITPOS 13
+#define ICP_QAT_FW_LA_SINGLE_PASS_PROTO 1
+#define QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t cipher_padding_sz;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ uint32_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t resrvd3;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd4;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id_cipher;
+ uint8_t cipher_padding_sz;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id_auth;
+ uint8_t resrvd1;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd2;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS 3
+#define ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS 4
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET 24
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+ } u;
+ uint64_t spc_aad_addr;
+ uint64_t spc_auth_res_addr;
+ uint16_t spc_aad_sz;
+ uint8_t reserved;
+ uint8_t spc_auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params {
+ uint32_t auth_off;
+ uint32_t auth_len;
+ union {
+ uint64_t auth_partial_st_prefix;
+ uint64_t aad_adr;
+ } u1;
+ uint64_t auth_res_addr;
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint8_t hash_state_sz;
+ uint8_t auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h
new file mode 100644
index 000000000..00813cffb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h
@@ -0,0 +1,1538 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+/**
+ * @file icp_qat_fw_mmp_ids.h
+ * @ingroup icp_qat_fw_mmp
+ * @brief
+ * This file documents the external interfaces that the QAT FW running
+ * on the QAT Acceleration Engine provides to clients wanting to
+ * accelerate crypto asymmetric applications
+ */
+
+#ifndef _ICP_QAT_FW_MMP_IDS_
+#define _ICP_QAT_FW_MMP_IDS_
+
+#define PKE_INIT 0x09061a09
+/**< Functionality ID for Initialisation sequence
+ * @li 1 input parameters : @link icp_qat_fw_mmp_init_input::z z @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_init_output::zz zz @endlink
+ */
+#define PKE_DH_G2_768 0x1c0b1a10
+/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
+ *768-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_768_input::e e
+ * @endlink @link icp_qat_fw_mmp_dh_g2_768_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_768_output::r r
+ * @endlink
+ */
+#define PKE_DH_768 0x210c1a1b
+/**< Functionality ID for Diffie-Hellman Modular exponentiation for 768-bit
+ *numbers
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dh_768_input::g g @endlink
+ * @link icp_qat_fw_mmp_dh_768_input::e e @endlink @link
+ * icp_qat_fw_mmp_dh_768_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_768_output::r r @endlink
+ */
+#define PKE_DH_G2_1024 0x220b1a27
+/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
+ * 1024-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_1024_input::e e
+ * @endlink @link icp_qat_fw_mmp_dh_g2_1024_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_1024_output::r r
+ * @endlink
+ */
+#define PKE_DH_1024 0x290c1a32
+/**< Functionality ID for Diffie-Hellman Modular exponentiation for 1024-bit
+ * numbers
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dh_1024_input::g g @endlink
+ * @link icp_qat_fw_mmp_dh_1024_input::e e @endlink @link
+ * icp_qat_fw_mmp_dh_1024_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_1024_output::r r @endlink
+ */
+#define PKE_DH_G2_1536 0x2e0b1a3e
+/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
+ * 1536-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_1536_input::e e
+ * @endlink @link icp_qat_fw_mmp_dh_g2_1536_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_1536_output::r r
+ * @endlink
+ */
+#define PKE_DH_1536 0x390c1a49
+/**< Functionality ID for Diffie-Hellman Modular exponentiation for 1536-bit
+ * numbers
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dh_1536_input::g g @endlink
+ * @link icp_qat_fw_mmp_dh_1536_input::e e @endlink @link
+ * icp_qat_fw_mmp_dh_1536_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_1536_output::r r @endlink
+ */
+#define PKE_DH_G2_2048 0x3e0b1a55
+/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
+ * 2048-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_2048_input::e e
+ * @endlink @link icp_qat_fw_mmp_dh_g2_2048_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_2048_output::r r
+ * @endlink
+ */
+#define PKE_DH_2048 0x4d0c1a60
+/**< Functionality ID for Diffie-Hellman Modular exponentiation for 2048-bit
+ * numbers
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dh_2048_input::g g @endlink
+ * @link icp_qat_fw_mmp_dh_2048_input::e e @endlink @link
+ * icp_qat_fw_mmp_dh_2048_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_2048_output::r r @endlink
+ */
+#define PKE_DH_G2_3072 0x3a0b1a6c
+/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
+ * 3072-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_3072_input::e e
+ * @endlink @link icp_qat_fw_mmp_dh_g2_3072_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_3072_output::r r
+ * @endlink
+ */
+#define PKE_DH_3072 0x510c1a77
+/**< Functionality ID for Diffie-Hellman Modular exponentiation for 3072-bit
+ * numbers
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dh_3072_input::g g @endlink
+ * @link icp_qat_fw_mmp_dh_3072_input::e e @endlink @link
+ * icp_qat_fw_mmp_dh_3072_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_3072_output::r r @endlink
+ */
+#define PKE_DH_G2_4096 0x4a0b1a83
+/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
+ * 4096-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_4096_input::e e
+ * @endlink @link icp_qat_fw_mmp_dh_g2_4096_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_4096_output::r r
+ * @endlink
+ */
+#define PKE_DH_4096 0x690c1a8e
+/**< Functionality ID for Diffie-Hellman Modular exponentiation for 4096-bit
+ * numbers
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dh_4096_input::g g @endlink
+ * @link icp_qat_fw_mmp_dh_4096_input::e e @endlink @link
+ * icp_qat_fw_mmp_dh_4096_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dh_4096_output::r r @endlink
+ */
+#define PKE_RSA_KP1_512 0x191d1a9a
+/**< Functionality ID for RSA 512 key generation first form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_512_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_512_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp1_512_input::e e @endlink
+ * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_512_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_512_output::d d @endlink
+ */
+#define PKE_RSA_KP2_512 0x19401acc
+/**< Functionality ID for RSA 512 key generation second form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_512_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_512_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_512_input::e e @endlink
+ * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_512_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_512_output::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_512_output::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_512_output::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_512_output::qinv qinv @endlink
+ */
+#define PKE_RSA_EP_512 0x1c161b21
+/**< Functionality ID for RSA 512 Encryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_512_input::m m
+ * @endlink @link icp_qat_fw_mmp_rsa_ep_512_input::e e @endlink @link
+ * icp_qat_fw_mmp_rsa_ep_512_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_512_output::c c
+ * @endlink
+ */
+#define PKE_RSA_DP1_512 0x1c161b3c
+/**< Functionality ID for RSA 512 Decryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_512_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp1_512_input::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_dp1_512_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_512_output::m m
+ * @endlink
+ */
+#define PKE_RSA_DP2_512 0x1c131b57
+/**< Functionality ID for RSA 1024 Decryption with CRT
+ * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_512_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp2_512_input::p p @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_512_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_512_input::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_512_input::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_512_input::qinv qinv @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_512_output::m m
+ * @endlink
+ */
+#define PKE_RSA_KP1_1024 0x36181b71
+/**< Functionality ID for RSA 1024 key generation first form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_1024_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_1024_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp1_1024_input::e e @endlink
+ * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_1024_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_1024_output::d d @endlink
+ */
+#define PKE_RSA_KP2_1024 0x40451b9e
+/**< Functionality ID for RSA 1024 key generation second form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_1024_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_1024_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1024_input::e e @endlink
+ * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_1024_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_1024_output::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1024_output::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1024_output::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1024_output::qinv qinv @endlink
+ */
+#define PKE_RSA_EP_1024 0x35111bf7
+/**< Functionality ID for RSA 1024 Encryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_1024_input::m m
+ * @endlink @link icp_qat_fw_mmp_rsa_ep_1024_input::e e @endlink @link
+ * icp_qat_fw_mmp_rsa_ep_1024_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_1024_output::c c
+ * @endlink
+ */
+#define PKE_RSA_DP1_1024 0x35111c12
+/**< Functionality ID for RSA 1024 Decryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_1024_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp1_1024_input::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_dp1_1024_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_1024_output::m m
+ * @endlink
+ */
+#define PKE_RSA_DP2_1024 0x26131c2d
+/**< Functionality ID for RSA 1024 Decryption with CRT
+ * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_1024_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp2_1024_input::p p @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1024_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1024_input::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1024_input::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1024_input::qinv qinv @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_1024_output::m m
+ * @endlink
+ */
+#define PKE_RSA_KP1_1536 0x531d1c46
+/**< Functionality ID for RSA 1536 key generation first form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_1536_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_1536_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp1_1536_input::e e @endlink
+ * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_1536_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_1536_output::d d @endlink
+ */
+#define PKE_RSA_KP2_1536 0x32391c78
+/**< Functionality ID for RSA 1536 key generation second form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_1536_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_1536_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1536_input::e e @endlink
+ * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_1536_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_1536_output::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1536_output::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1536_output::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_1536_output::qinv qinv @endlink
+ */
+#define PKE_RSA_EP_1536 0x4d111cdc
+/**< Functionality ID for RSA 1536 Encryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_1536_input::m m
+ * @endlink @link icp_qat_fw_mmp_rsa_ep_1536_input::e e @endlink @link
+ * icp_qat_fw_mmp_rsa_ep_1536_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_1536_output::c c
+ * @endlink
+ */
+#define PKE_RSA_DP1_1536 0x4d111cf7
+/**< Functionality ID for RSA 1536 Decryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_1536_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp1_1536_input::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_dp1_1536_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_1536_output::m m
+ *@endlink
+ */
+#define PKE_RSA_DP2_1536 0x45111d12
+/**< Functionality ID for RSA 1536 Decryption with CRT
+ * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_1536_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp2_1536_input::p p @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1536_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1536_input::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1536_input::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_1536_input::qinv qinv @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_1536_output::m m
+ * @endlink
+ */
+#define PKE_RSA_KP1_2048 0x72181d2e
+/**< Functionality ID for RSA 2048 key generation first form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_2048_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_2048_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp1_2048_input::e e @endlink
+ * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_2048_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_2048_output::d d @endlink
+ */
+#define PKE_RSA_KP2_2048 0x42341d5b
+/**< Functionality ID for RSA 2048 key generation second form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_2048_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_2048_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_2048_input::e e @endlink
+ * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_2048_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_2048_output::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_2048_output::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_2048_output::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_2048_output::qinv qinv @endlink
+ */
+#define PKE_RSA_EP_2048 0x6e111dba
+/**< Functionality ID for RSA 2048 Encryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_2048_input::m m
+ * @endlink @link icp_qat_fw_mmp_rsa_ep_2048_input::e e @endlink @link
+ * icp_qat_fw_mmp_rsa_ep_2048_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_2048_output::c c
+ * @endlink
+ */
+#define PKE_RSA_DP1_2048 0x6e111dda
+/**< Functionality ID for RSA 2048 Decryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_2048_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp1_2048_input::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_dp1_2048_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_2048_output::m m
+ * @endlink
+ */
+#define PKE_RSA_DP2_2048 0x59121dfa
+/**< Functionality ID for RSA 2048 Decryption with CRT
+ * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_2048_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp2_2048_input::p p @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_2048_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_2048_input::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_2048_input::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_2048_input::qinv qinv @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_2048_output::m m
+ * @endlink
+ */
+#define PKE_RSA_KP1_3072 0x60191e16
+/**< Functionality ID for RSA 3072 key generation first form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_3072_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_3072_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp1_3072_input::e e @endlink
+ * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_3072_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_3072_output::d d @endlink
+ */
+#define PKE_RSA_KP2_3072 0x68331e45
+/**< Functionality ID for RSA 3072 key generation second form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_3072_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_3072_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_3072_input::e e @endlink
+ * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_3072_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_3072_output::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_3072_output::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_3072_output::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_3072_output::qinv qinv @endlink
+ */
+#define PKE_RSA_EP_3072 0x7d111ea3
+/**< Functionality ID for RSA 3072 Encryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_3072_input::m m
+ * @endlink @link icp_qat_fw_mmp_rsa_ep_3072_input::e e @endlink @link
+ * icp_qat_fw_mmp_rsa_ep_3072_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_3072_output::c c
+ * @endlink
+ */
+#define PKE_RSA_DP1_3072 0x7d111ebe
+/**< Functionality ID for RSA 3072 Decryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_3072_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp1_3072_input::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_dp1_3072_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_3072_output::m m
+ * @endlink
+ */
+#define PKE_RSA_DP2_3072 0x81121ed9
+/**< Functionality ID for RSA 3072 Decryption with CRT
+ * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_3072_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp2_3072_input::p p @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_3072_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_3072_input::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_3072_input::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_3072_input::qinv qinv @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_3072_output::m m
+ * @endlink
+ */
+#define PKE_RSA_KP1_4096 0x7d1f1ef6
+/**< Functionality ID for RSA 4096 key generation first form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_4096_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_4096_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp1_4096_input::e e @endlink
+ * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_4096_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp1_4096_output::d d @endlink
+ */
+#define PKE_RSA_KP2_4096 0x91251f27
+/**< Functionality ID for RSA 4096 key generation second form
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_4096_input::p p
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_4096_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_4096_input::e e @endlink
+ * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_4096_output::n n
+ * @endlink @link icp_qat_fw_mmp_rsa_kp2_4096_output::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_4096_output::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_4096_output::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_kp2_4096_output::qinv qinv @endlink
+ */
+#define PKE_RSA_EP_4096 0xa5101f7e
+/**< Functionality ID for RSA 4096 Encryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_4096_input::m m
+ * @endlink @link icp_qat_fw_mmp_rsa_ep_4096_input::e e @endlink @link
+ * icp_qat_fw_mmp_rsa_ep_4096_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_4096_output::c c
+ * @endlink
+ */
+#define PKE_RSA_DP1_4096 0xa5101f98
+/**< Functionality ID for RSA 4096 Decryption
+ * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_4096_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp1_4096_input::d d @endlink @link
+ * icp_qat_fw_mmp_rsa_dp1_4096_input::n n @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_4096_output::m m
+ * @endlink
+ */
+#define PKE_RSA_DP2_4096 0xb1111fb2
+/**< Functionality ID for RSA 4096 Decryption with CRT
+ * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_4096_input::c c
+ * @endlink @link icp_qat_fw_mmp_rsa_dp2_4096_input::p p @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_4096_input::q q @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_4096_input::dp dp @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_4096_input::dq dq @endlink @link
+ * icp_qat_fw_mmp_rsa_dp2_4096_input::qinv qinv @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_4096_output::m m
+ * @endlink
+ */
+#define PKE_GCD_PT_192 0x19201fcd
+/**< Functionality ID for GCD primality test for 192-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_192_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_256 0x19201ff7
+/**< Functionality ID for GCD primality test for 256-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_256_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_384 0x19202021
+/**< Functionality ID for GCD primality test for 384-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_384_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_512 0x1b1b204b
+/**< Functionality ID for GCD primality test for 512-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_512_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_768 0x170c2070
+/**< Functionality ID for GCD primality test for 768-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_768_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_1024 0x130f2085
+/**< Functionality ID for GCD primality test for 1024-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_1024_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_1536 0x1d0c2094
+/**< Functionality ID for GCD primality test for 1536-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_1536_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_2048 0x210c20a5
+/**< Functionality ID for GCD primality test for 2048-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_2048_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_3072 0x290c20b6
+/**< Functionality ID for GCD primality test for 3072-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_3072_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_GCD_PT_4096 0x310c20c7
+/**< Functionality ID for GCD primality test for 4096-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_4096_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_160 0x0e1120d8
+/**< Functionality ID for Fermat primality test for 160-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_160_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_512 0x121120ee
+/**< Functionality ID for Fermat primality test for 512-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_512_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_L512 0x19162104
+/**< Functionality ID for Fermat primality test for &lte; 512-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_l512_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_768 0x19112124
+/**< Functionality ID for Fermat primality test for 768-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_768_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_1024 0x1f11213a
+/**< Functionality ID for Fermat primality test for 1024-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_1024_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_1536 0x2b112150
+/**< Functionality ID for Fermat primality test for 1536-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_1536_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_2048 0x3b112166
+/**< Functionality ID for Fermat primality test for 2048-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_2048_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_3072 0x3a11217c
+/**< Functionality ID for Fermat primality test for 3072-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_3072_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_FERMAT_PT_4096 0x4a112192
+/**< Functionality ID for Fermat primality test for 4096-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_4096_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_160 0x0e1221a8
+/**< Functionality ID for Miller-Rabin primality test for 160-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_160_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_160_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_512 0x111221bf
+/**< Functionality ID for Miller-Rabin primality test for 512-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_512_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_512_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_768 0x1d0d21d6
+/**< Functionality ID for Miller-Rabin primality test for 768-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_768_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_768_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_1024 0x250d21ed
+/**< Functionality ID for Miller-Rabin primality test for 1024-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_1024_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_1024_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_1536 0x350d2204
+/**< Functionality ID for Miller-Rabin primality test for 1536-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_1536_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_1536_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_2048 0x490d221b
+/**< Functionality ID for Miller-Rabin primality test for 2048-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_2048_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_2048_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_3072 0x4d0d2232
+/**< Functionality ID for Miller-Rabin primality test for 3072-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_3072_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_3072_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_4096 0x650d2249
+/**< Functionality ID for Miller-Rabin primality test for 4096-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_4096_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_4096_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_MR_PT_L512 0x18182260
+/**< Functionality ID for Miller-Rabin primality test for 512-bit numbers
+ * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_l512_input::x x
+ * @endlink @link icp_qat_fw_mmp_mr_pt_l512_input::m m @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_160 0x0e0c227e
+/**< Functionality ID for Lucas primality test for 160-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_160_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_512 0x110c228f
+/**< Functionality ID for Lucas primality test for 512-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_512_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_768 0x130c22a0
+/**< Functionality ID for Lucas primality test for 768-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_768_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_1024 0x150c22b1
+/**< Functionality ID for Lucas primality test for 1024-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_1024_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_1536 0x190c22c2
+/**< Functionality ID for Lucas primality test for 1536-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_1536_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_2048 0x1d0c22d3
+/**< Functionality ID for Lucas primality test for 2048-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_2048_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_3072 0x250c22e4
+/**< Functionality ID for Lucas primality test for 3072-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_3072_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_4096 0x661522f5
+/**< Functionality ID for Lucas primality test for 4096-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_4096_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define PKE_LUCAS_PT_L512 0x1617230a
+/**< Functionality ID for Lucas primality test for L512-bit numbers
+ * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_l512_input::m m
+ * @endlink
+ * @li no output parameters
+ */
+#define MATHS_MODEXP_L512 0x150c2327
+/**< Functionality ID for Modular exponentiation for numbers less than 512-bits
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l512_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l512_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l512_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l512_output::r r
+@endlink
+ */
+#define MATHS_MODEXP_L1024 0x2d0c233e
+/**< Functionality ID for Modular exponentiation for numbers less than 1024-bit
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l1024_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l1024_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l1024_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l1024_output::r r
+ * @endlink
+ */
+#define MATHS_MODEXP_L1536 0x410c2355
+/**< Functionality ID for Modular exponentiation for numbers less than 1536-bits
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l1536_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l1536_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l1536_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l1536_output::r r
+ * @endlink
+ */
+#define MATHS_MODEXP_L2048 0x5e12236c
+/**< Functionality ID for Modular exponentiation for numbers less than 2048-bit
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l2048_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l2048_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l2048_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l2048_output::r r
+ * @endlink
+ */
+#define MATHS_MODEXP_L2560 0x60162388
+/**< Functionality ID for Modular exponentiation for numbers less than 2560-bits
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l2560_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l2560_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l2560_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l2560_output::r r
+ * @endlink
+ */
+#define MATHS_MODEXP_L3072 0x650c23a9
+/**< Functionality ID for Modular exponentiation for numbers less than 3072-bits
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l3072_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l3072_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l3072_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l3072_output::r r
+ * @endlink
+ */
+#define MATHS_MODEXP_L3584 0x801623c0
+/**< Functionality ID for Modular exponentiation for numbers less than 3584-bits
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l3584_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l3584_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l3584_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l3584_output::r r
+ * @endlink
+ */
+#define MATHS_MODEXP_L4096 0x850c23e1
+/**< Functionality ID for Modular exponentiation for numbers less than 4096-bit
+ * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l4096_input::g g
+ * @endlink @link icp_qat_fw_maths_modexp_l4096_input::e e @endlink @link
+ * icp_qat_fw_maths_modexp_l4096_input::m m @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l4096_output::r r
+ * @endlink
+ */
+#define MATHS_MODINV_ODD_L128 0x090623f8
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 128 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l128_input::a a
+ * @endlink @link icp_qat_fw_maths_modinv_odd_l128_input::b b @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l128_output::c
+ * c @endlink
+ */
+#define MATHS_MODINV_ODD_L192 0x0a0623fe
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 192 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l192_input::a a
+ * @endlink @link icp_qat_fw_maths_modinv_odd_l192_input::b b @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l192_output::c
+ * c @endlink
+ */
+#define MATHS_MODINV_ODD_L256 0x0a062404
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 256 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l256_input::a a
+ * @endlink @link icp_qat_fw_maths_modinv_odd_l256_input::b b @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l256_output::c
+ * c @endlink
+ */
+#define MATHS_MODINV_ODD_L384 0x0b06240a
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 384 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l384_input::a a
+ * @endlink @link icp_qat_fw_maths_modinv_odd_l384_input::b b @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l384_output::c
+ * c @endlink
+ */
+#define MATHS_MODINV_ODD_L512 0x0c062410
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 512 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l512_input::a a
+ * @endlink @link icp_qat_fw_maths_modinv_odd_l512_input::b b @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l512_output::c
+ * c @endlink
+ */
+#define MATHS_MODINV_ODD_L768 0x0e062416
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 768 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l768_input::a a
+ * @endlink @link icp_qat_fw_maths_modinv_odd_l768_input::b b @endlink
+ * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l768_output::c
+ * c @endlink
+ */
+#define MATHS_MODINV_ODD_L1024 0x1006241c
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 1024 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l1024_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_odd_l1024_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_odd_l1024_output::c c @endlink
+ */
+#define MATHS_MODINV_ODD_L1536 0x18062422
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 1536 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l1536_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_odd_l1536_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_odd_l1536_output::c c @endlink
+ */
+#define MATHS_MODINV_ODD_L2048 0x20062428
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 2048 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l2048_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_odd_l2048_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_odd_l2048_output::c c @endlink
+ */
+#define MATHS_MODINV_ODD_L3072 0x3006242e
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 3072 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l3072_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_odd_l3072_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_odd_l3072_output::c c @endlink
+ */
+#define MATHS_MODINV_ODD_L4096 0x40062434
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 4096 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l4096_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_odd_l4096_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_odd_l4096_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L128 0x0906243a
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 128 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l128_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l128_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l128_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L192 0x0a062440
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 192 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l192_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l192_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l192_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L256 0x0a062446
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 256 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l256_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l256_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l256_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L384 0x0e0b244c
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 384 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l384_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l384_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l384_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L512 0x110b2457
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 512 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l512_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l512_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l512_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L768 0x170b2462
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 768 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l768_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l768_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l768_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L1024 0x1d0b246d
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 1024 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l1024_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l1024_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l1024_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L1536 0x290b2478
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 1536 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l1536_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l1536_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l1536_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L2048 0x350b2483
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 2048 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l2048_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l2048_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l2048_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L3072 0x4d0b248e
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 3072 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l3072_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l3072_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l3072_output::c c @endlink
+ */
+#define MATHS_MODINV_EVEN_L4096 0x650b2499
+/**< Functionality ID for Modular multiplicative inverse for numbers less than
+ * 4096 bits
+ * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l4096_input::a
+ * a @endlink @link icp_qat_fw_maths_modinv_even_l4096_input::b b @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_maths_modinv_even_l4096_output::c c @endlink
+ */
+#define PKE_DSA_GEN_P_1024_160 0x381824a4
+/**< Functionality ID for DSA parameter generation P
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_1024_160_input::x
+ * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_1024_160_input::q q @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_gen_p_1024_160_output::p p @endlink
+ */
+#define PKE_DSA_GEN_G_1024 0x261424d4
+/**< Functionality ID for DSA key generation G
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_g_1024_input::p p
+ * @endlink @link icp_qat_fw_mmp_dsa_gen_g_1024_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_gen_g_1024_input::h h @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_g_1024_output::g g
+ * @endlink
+ */
+#define PKE_DSA_GEN_Y_1024 0x291224ed
+/**< Functionality ID for DSA key generation Y
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_y_1024_input::p p
+ * @endlink @link icp_qat_fw_mmp_dsa_gen_y_1024_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_gen_y_1024_input::x x @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_y_1024_output::y y
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_1024_160 0x2c1c2504
+/**< Functionality ID for DSA Sign R
+ * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_1024_160_input::k
+ * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_1024_160_input::p p @endlink
+ * @link icp_qat_fw_mmp_dsa_sign_r_1024_160_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_1024_160_input::g g @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_1024_160_output::r r @endlink
+ */
+#define PKE_DSA_SIGN_S_160 0x12142526
+/**< Functionality ID for DSA Sign S
+ * @li 5 input parameters : @link icp_qat_fw_mmp_dsa_sign_s_160_input::m m
+ * @endlink @link icp_qat_fw_mmp_dsa_sign_s_160_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_160_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_160_input::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_160_input::x x @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_sign_s_160_output::s s
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_S_1024_160 0x301e2540
+/**< Functionality ID for DSA Sign R S
+ * @li 6 input parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::x x @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_output::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_output::s s @endlink
+ */
+#define PKE_DSA_VERIFY_1024_160 0x323a2570
+/**< Functionality ID for DSA Verify
+ * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_1024_160_input::r
+ * r @endlink @link icp_qat_fw_mmp_dsa_verify_1024_160_input::s s @endlink
+ * @link icp_qat_fw_mmp_dsa_verify_1024_160_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_1024_160_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_1024_160_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_1024_160_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_1024_160_input::y y @endlink
+ * @li no output parameters
+ */
+#define PKE_DSA_GEN_P_2048_224 0x341d25be
+/**< Functionality ID for DSA parameter generation P
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_2048_224_input::x
+ * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_2048_224_input::q q @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_gen_p_2048_224_output::p p @endlink
+ */
+#define PKE_DSA_GEN_Y_2048 0x4d1225ea
+/**< Functionality ID for DSA key generation Y
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_y_2048_input::p p
+ * @endlink @link icp_qat_fw_mmp_dsa_gen_y_2048_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_gen_y_2048_input::x x @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_y_2048_output::y y
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_2048_224 0x511c2601
+/**< Functionality ID for DSA Sign R
+ * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_2048_224_input::k
+ * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_2048_224_input::p p @endlink
+ * @link icp_qat_fw_mmp_dsa_sign_r_2048_224_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_2048_224_input::g g @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_2048_224_output::r r @endlink
+ */
+#define PKE_DSA_SIGN_S_224 0x15142623
+/**< Functionality ID for DSA Sign S
+ * @li 5 input parameters : @link icp_qat_fw_mmp_dsa_sign_s_224_input::m m
+ * @endlink @link icp_qat_fw_mmp_dsa_sign_s_224_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_224_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_224_input::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_224_input::x x @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_sign_s_224_output::s s
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_S_2048_224 0x571e263d
+/**< Functionality ID for DSA Sign R S
+ * @li 6 input parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::x x @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_output::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_output::s s @endlink
+ */
+#define PKE_DSA_VERIFY_2048_224 0x6930266d
+/**< Functionality ID for DSA Verify
+ * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_2048_224_input::r
+ * r @endlink @link icp_qat_fw_mmp_dsa_verify_2048_224_input::s s @endlink
+ * @link icp_qat_fw_mmp_dsa_verify_2048_224_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_224_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_224_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_224_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_224_input::y y @endlink
+ * @li no output parameters
+ */
+#define PKE_DSA_GEN_P_2048_256 0x431126b7
+/**< Functionality ID for DSA parameter generation P
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_2048_256_input::x
+ * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_2048_256_input::q q @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_gen_p_2048_256_output::p p @endlink
+ */
+#define PKE_DSA_GEN_G_2048 0x4b1426ed
+/**< Functionality ID for DSA key generation G
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_g_2048_input::p p
+ * @endlink @link icp_qat_fw_mmp_dsa_gen_g_2048_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_gen_g_2048_input::h h @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_g_2048_output::g g
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_2048_256 0x5b182706
+/**< Functionality ID for DSA Sign R
+ * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_2048_256_input::k
+ * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_2048_256_input::p p @endlink
+ * @link icp_qat_fw_mmp_dsa_sign_r_2048_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_2048_256_input::g g @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_2048_256_output::r r @endlink
+ */
+#define PKE_DSA_SIGN_S_256 0x15142733
+/**< Functionality ID for DSA Sign S
+ * @li 5 input parameters : @link icp_qat_fw_mmp_dsa_sign_s_256_input::m m
+ * @endlink @link icp_qat_fw_mmp_dsa_sign_s_256_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_256_input::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_s_256_input::x x @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_sign_s_256_output::s s
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_S_2048_256 0x5a2a274d
+/**< Functionality ID for DSA Sign R S
+ * @li 6 input parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::x x @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_output::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_output::s s @endlink
+ */
+#define PKE_DSA_VERIFY_2048_256 0x723a2789
+/**< Functionality ID for DSA Verify
+ * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_2048_256_input::r
+ * r @endlink @link icp_qat_fw_mmp_dsa_verify_2048_256_input::s s @endlink
+ * @link icp_qat_fw_mmp_dsa_verify_2048_256_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_256_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_256_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_2048_256_input::y y @endlink
+ * @li no output parameters
+ */
+#define PKE_DSA_GEN_P_3072_256 0x4b1127e0
+/**< Functionality ID for DSA parameter generation P
+ * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_3072_256_input::x
+ * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_3072_256_input::q q @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_gen_p_3072_256_output::p p @endlink
+ */
+#define PKE_DSA_GEN_G_3072 0x4f142816
+/**< Functionality ID for DSA key generation G
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_g_3072_input::p p
+ * @endlink @link icp_qat_fw_mmp_dsa_gen_g_3072_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_gen_g_3072_input::h h @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_g_3072_output::g g
+ * @endlink
+ */
+#define PKE_DSA_GEN_Y_3072 0x5112282f
+/**< Functionality ID for DSA key generation Y
+ * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_y_3072_input::p p
+ * @endlink @link icp_qat_fw_mmp_dsa_gen_y_3072_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_gen_y_3072_input::x x @endlink
+ * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_y_3072_output::y y
+ * @endlink
+ */
+#define PKE_DSA_SIGN_R_3072_256 0x59282846
+/**< Functionality ID for DSA Sign R
+ * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_3072_256_input::k
+ * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_3072_256_input::p p @endlink
+ * @link icp_qat_fw_mmp_dsa_sign_r_3072_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_3072_256_input::g g @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_3072_256_output::r r @endlink
+ */
+#define PKE_DSA_SIGN_R_S_3072_256 0x61292874
+/**< Functionality ID for DSA Sign R S
+ * @li 6 input parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::k k @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::x x @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_output::r r @endlink @link
+ * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_output::s s @endlink
+ */
+#define PKE_DSA_VERIFY_3072_256 0x7f4328ae
+/**< Functionality ID for DSA Verify
+ * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_3072_256_input::r
+ * r @endlink @link icp_qat_fw_mmp_dsa_verify_3072_256_input::s s @endlink
+ * @link icp_qat_fw_mmp_dsa_verify_3072_256_input::m m @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_3072_256_input::p p @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_3072_256_input::q q @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_3072_256_input::g g @endlink @link
+ * icp_qat_fw_mmp_dsa_verify_3072_256_input::y y @endlink
+ * @li no output parameters
+ */
+#define PKE_ECDSA_SIGN_RS_GF2_L256 0x46512907
+/**< Functionality ID for ECDSA Sign RS for curves B/K-163 and B/K-233
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l256_input::in in @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l256_output::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l256_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_R_GF2_L256 0x323a298f
+/**< Functionality ID for ECDSA Sign R for curves B/K-163 and B/K-233
+ * @li 7 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::xg xg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::yg yg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::n n @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::q q @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::a a @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::b b @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::k k @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_output::r r @endlink
+ */
+#define PKE_ECDSA_SIGN_S_GF2_L256 0x2b2229e6
+/**< Functionality ID for ECDSA Sign S for curves with n &lt; 2^256
+ * @li 5 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::e e @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::d d @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::k k @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::n n @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_output::s s @endlink
+ */
+#define PKE_ECDSA_VERIFY_GF2_L256 0x337e2a27
+/**< Functionality ID for ECDSA Verify for curves B/K-163 and B/K-233
+ * @li 1 input parameters : @link
+ *icp_qat_fw_mmp_ecdsa_verify_gf2_l256_input::in in @endlink
+ * @li no output parameters
+ */
+#define PKE_ECDSA_SIGN_RS_GF2_L512 0x5e5f2ad7
+/**< Functionality ID for ECDSA Sign RS
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l512_input::in in @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l512_output::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l512_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_R_GF2_L512 0x84312b6a
+/**< Functionality ID for ECDSA GF2 Sign R
+ * @li 7 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::xg xg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::yg yg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::n n @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::q q @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::a a @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::b b @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::k k @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_output::r r @endlink
+ */
+#define PKE_ECDSA_SIGN_S_GF2_L512 0x26182bbe
+/**< Functionality ID for ECDSA GF2 Sign S
+ * @li 5 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::e e @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::d d @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::k k @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::n n @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_output::s s @endlink
+ */
+#define PKE_ECDSA_VERIFY_GF2_L512 0x58892bea
+/**< Functionality ID for ECDSA GF2 Verify
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_verify_gf2_l512_input::in in @endlink
+ * @li no output parameters
+ */
+#define PKE_ECDSA_SIGN_RS_GF2_571 0x554a2c93
+/**< Functionality ID for ECDSA GF2 Sign RS for curves B-571/K-571
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_571_input::in in @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_571_output::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_571_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_S_GF2_571 0x52332d09
+/**< Functionality ID for ECDSA GF2 Sign S for curves with deg(q) &lt; 576
+ * @li 5 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::e e @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::d d @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::k k @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::n n @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_R_GF2_571 0x731a2d51
+/**< Functionality ID for ECDSA GF2 Sign R for degree 571
+ * @li 7 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::xg xg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::yg yg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::n n @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::q q @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::a a @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::b b @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::k k @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_output::r r @endlink
+ */
+#define PKE_ECDSA_VERIFY_GF2_571 0x4f6c2d91
+/**< Functionality ID for ECDSA GF2 Verify for degree 571
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_verify_gf2_571_input::in in @endlink
+ * @li no output parameters
+ */
+#define MATHS_POINT_MULTIPLICATION_GF2_L256 0x3b242e38
+/**< Functionality ID for MATHS GF2 Point Multiplication
+ * @li 7 input parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::k k @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::xg xg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::yg yg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::a a @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::b b @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::q q @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_input::h h @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_output::xk xk @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l256_output::yk yk @endlink
+ */
+#define MATHS_POINT_VERIFY_GF2_L256 0x231a2e7c
+/**< Functionality ID for MATHS GF2 Point Verification
+ * @li 5 input parameters : @link
+ * icp_qat_fw_maths_point_verify_gf2_l256_input::xq xq @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l256_input::yq yq @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l256_input::q q @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l256_input::a a @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l256_input::b b @endlink
+ * @li no output parameters
+ */
+#define MATHS_POINT_MULTIPLICATION_GF2_L512 0x722c2e96
+/**< Functionality ID for MATHS GF2 Point Multiplication
+ * @li 7 input parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::k k @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::xg xg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::yg yg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::a a @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::b b @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::q q @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_input::h h @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_output::xk xk @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_l512_output::yk yk @endlink
+ */
+#define MATHS_POINT_VERIFY_GF2_L512 0x25132ee2
+/**< Functionality ID for MATHS GF2 Point Verification
+ * @li 5 input parameters : @link
+ * icp_qat_fw_maths_point_verify_gf2_l512_input::xq xq @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l512_input::yq yq @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l512_input::q q @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l512_input::a a @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_l512_input::b b @endlink
+ * @li no output parameters
+ */
+#define MATHS_POINT_MULTIPLICATION_GF2_571 0x44152ef5
+/**< Functionality ID for ECC GF2 Point Multiplication for curves B-571/K-571
+ * @li 7 input parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::k k @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::xg xg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::yg yg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::a a @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::b b @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::q q @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_input::h h @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_output::xk xk @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gf2_571_output::yk yk @endlink
+ */
+#define MATHS_POINT_VERIFY_GF2_571 0x12072f1b
+/**< Functionality ID for ECC GF2 Point Verification for degree 571
+ * @li 5 input parameters : @link
+ * icp_qat_fw_maths_point_verify_gf2_571_input::xq xq @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_571_input::yq yq @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_571_input::q q @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_571_input::a a @endlink @link
+ * icp_qat_fw_maths_point_verify_gf2_571_input::b b @endlink
+ * @li no output parameters
+ */
+#define PKE_KPT_ECDSA_SIGN_RS_GF2_L256 0x515217d9
+/**< Functionality ID for KPT ECDSA Sign RS for curves B/K-163 and B/K-233
+ * @li 3 input parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_input::in in @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_input::d d @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_input::c c @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_output::r r @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_output::s s @endlink
+ */
+#define PKE_KPT_ECDSA_SIGN_RS_GF2_L512 0x4d811987
+/**< Functionality ID for KPT ECDSA Sign RS
+ * @li 3 input parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_input::in in @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_input::d d @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_input::c c @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_output::r r @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_output::s s @endlink
+ */
+#define PKE_KPT_ECDSA_SIGN_RS_GF2_571 0x45731898
+/**< Functionality ID for KPT ECDSA GF2 Sign RS for curves B-571/K-571
+ * @li 3 input parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_input::in in @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_input::d d @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_input::c c @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_output::r r @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_R_GFP_L256 0x431b2f22
+/**< Functionality ID for ECDSA GFP Sign R
+ * @li 7 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::xg xg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::yg yg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::n n @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::q q @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::a a @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::b b @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::k k @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_output::r r @endlink
+ */
+#define PKE_ECDSA_SIGN_S_GFP_L256 0x2b252f6d
+/**< Functionality ID for ECDSA GFP Sign S
+ * @li 5 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::e e @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::d d @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::k k @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::n n @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_RS_GFP_L256 0x6a3c2fa6
+/**< Functionality ID for ECDSA GFP Sign RS
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l256_input::in in @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l256_output::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l256_output::s s @endlink
+ */
+#define PKE_ECDSA_VERIFY_GFP_L256 0x325b3023
+/**< Functionality ID for ECDSA GFP Verify
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_verify_gfp_l256_input::in in @endlink
+ * @li no output parameters
+ */
+#define PKE_ECDSA_SIGN_R_GFP_L512 0x4e2530b3
+/**< Functionality ID for ECDSA GFP Sign R
+ * @li 7 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::xg xg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::yg yg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::n n @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::q q @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::a a @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::b b @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::k k @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_output::r r @endlink
+ */
+#define PKE_ECDSA_SIGN_S_GFP_L512 0x251830fa
+/**< Functionality ID for ECDSA GFP Sign S
+ * @li 5 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::e e @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::d d @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::k k @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::n n @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_RS_GFP_L512 0x5a2b3127
+/**< Functionality ID for ECDSA GFP Sign RS
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l512_input::in in @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l512_output::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l512_output::s s @endlink
+ */
+#define PKE_ECDSA_VERIFY_GFP_L512 0x3553318a
+/**< Functionality ID for ECDSA GFP Verify
+ * @li 1 input parameters : @link
+icp_qat_fw_mmp_ecdsa_verify_gfp_l512_input::in in @endlink
+ * @li no output parameters
+ */
+#define PKE_ECDSA_SIGN_R_GFP_521 0x772c31fe
+/**< Functionality ID for ECDSA GFP Sign R
+ * @li 7 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::xg xg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::yg yg @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::n n @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::q q @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::a a @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::b b @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::k k @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_output::r r @endlink
+ */
+#define PKE_ECDSA_SIGN_S_GFP_521 0x52343251
+/**< Functionality ID for ECDSA GFP Sign S
+ * @li 5 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::e e @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::d d @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::k k @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::n n @endlink
+ * @li 1 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_output::s s @endlink
+ */
+#define PKE_ECDSA_SIGN_RS_GFP_521 0x494a329b
+/**< Functionality ID for ECDSA GFP Sign RS
+ * @li 1 input parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_521_input::in in @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_521_output::r r @endlink @link
+ * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_521_output::s s @endlink
+ */
+#define PKE_ECDSA_VERIFY_GFP_521 0x554c331f
+/**< Functionality ID for ECDSA GFP Verify
+ * @li 1 input parameters : @link
+icp_qat_fw_mmp_ecdsa_verify_gfp_521_input::in in @endlink
+ * @li no output parameters
+ */
+#define MATHS_POINT_MULTIPLICATION_GFP_L256 0x432033a6
+/**< Functionality ID for ECC GFP Point Multiplication
+ * @li 7 input parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::k k @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::xg xg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::yg yg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::a a @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::b b @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::q q @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_input::h h @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_output::xk xk @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l256_output::yk yk @endlink
+ */
+#define MATHS_POINT_VERIFY_GFP_L256 0x1f0c33fc
+/**< Functionality ID for ECC GFP Partial Point Verification
+ * @li 5 input parameters : @link
+ * icp_qat_fw_maths_point_verify_gfp_l256_input::xq xq @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l256_input::yq yq @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l256_input::q q @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l256_input::a a @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l256_input::b b @endlink
+ * @li no output parameters
+ */
+#define MATHS_POINT_MULTIPLICATION_GFP_L512 0x41253419
+/**< Functionality ID for ECC GFP Point Multiplication
+ * @li 7 input parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::k k @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::xg xg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::yg yg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::a a @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::b b @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::q q @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_input::h h @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_output::xk xk @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_l512_output::yk yk @endlink
+ */
+#define MATHS_POINT_VERIFY_GFP_L512 0x2612345c
+/**< Functionality ID for ECC GFP Partial Point
+ * @li 5 input parameters : @link
+ * icp_qat_fw_maths_point_verify_gfp_l512_input::xq xq @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l512_input::yq yq @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l512_input::q q @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l512_input::a a @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_l512_input::b b @endlink
+ * @li no output parameters
+ */
+#define MATHS_POINT_MULTIPLICATION_GFP_521 0x5511346e
+/**< Functionality ID for ECC GFP Point Multiplication
+ * @li 7 input parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::k k @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::xg xg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::yg yg @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::a a @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::b b @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::q q @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_input::h h @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_output::xk xk @endlink @link
+ * icp_qat_fw_maths_point_multiplication_gfp_521_output::yk yk @endlink
+ */
+#define MATHS_POINT_VERIFY_GFP_521 0x0e0734be
+/**< Functionality ID for ECC GFP Partial Point Verification
+ * @li 5 input parameters : @link
+ * icp_qat_fw_maths_point_verify_gfp_521_input::xq xq @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_521_input::yq yq @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_521_input::q q @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_521_input::a a @endlink @link
+ * icp_qat_fw_maths_point_verify_gfp_521_input::b b @endlink
+ * @li no output parameters
+ */
+#define PKE_KPT_ECDSA_SIGN_RS_GFP_L256 0x1b6b182c
+/**< Functionality ID for KPT ECDSA GFP Sign RS
+ * @li 3 input parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_input::in in @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_input::d d @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_input::c c @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_output::r r @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_output::s s @endlink
+ */
+#define PKE_KPT_ECDSA_SIGN_RS_GFP_L512 0x7439179f
+/**< Functionality ID for KPT ECDSA GFP Sign RS
+ * @li 3 input parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_input::in in @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_input::d d @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_input::c c @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_output::r r @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_output::s s @endlink
+ */
+#define PKE_KPT_ECDSA_SIGN_RS_GFP_521 0x3b7a190c
+/**< Functionality ID for KPT ECDSA GFP Sign RS
+ * @li 3 input parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_input::in in @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_input::d d @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_input::c c @endlink
+ * @li 2 output parameters : @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_output::r r @endlink @link
+ * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_output::s s @endlink
+ */
+
+#define PKE_LIVENESS 0x00000001
+/**< Functionality ID for PKE_LIVENESS
+ * @li 0 input parameter(s)
+ * @li 1 output parameter(s) (8 qwords)
+ */
+#define PKE_INTERFACE_SIGNATURE 0x972ded54
+/**< Encoded signature of the interface specifications
+ */
+
+#define PKE_INVALID_FUNC_ID 0xffffffff
+
+#endif /* __ICP_QAT_FW_MMP_IDS__ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h
new file mode 100644
index 000000000..b2cdf0a0a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+/**
+ * @file icp_qat_fw_pke.h
+ * @defgroup icp_qat_fw_pke ICP QAT FW PKE Processing Definitions
+ * @ingroup icp_qat_fw
+ * Revision: 0.1
+ * @brief
+ * This file documents the external interfaces that the QAT FW running
+ * on the QAT Acceleration Engine provides to clients wanting to
+ * accelerate crypto asymmetric applications
+ */
+
+#ifndef _ICP_QAT_FW_PKE_H_
+#define _ICP_QAT_FW_PKE_H_
+
+/*
+ * Keep all dpdk-specific changes in this section
+ */
+
+#include <stdint.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+/* End of DPDK-specific section
+ * Don't modify below this.
+ */
+
+/*
+ ****************************************************************************
+ * Include local header files
+ ****************************************************************************
+ */
+#include "icp_qat_fw.h"
+
+/**
+ *****************************************************************************
+ *
+ * @ingroup icp_qat_fw_pke
+ *
+ * @brief
+ * PKE response status field structure contained
+ * within LW1, comprising the common error codes and
+ * the response flags.
+ *
+ *****************************************************************************/
+struct icp_qat_fw_pke_resp_status {
+ u8 comn_err_code;
+ /**< 8 bit common error code */
+
+ u8 pke_resp_flags;
+ /**< 8-bit PKE response flags */
+};
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_pke
+ * Definition of the QAT FW PKE request header pars field.
+ *
+ * @description
+ * PKE request message header pars structure
+ *
+ *****************************************************************************/
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+ /**< LWs 2-3 */
+ u64 content_desc_addr;
+ /**< Content descriptor pointer */
+
+ /**< LW 4 */
+ u32 content_desc_resrvd;
+ /**< Content descriptor reserved field */
+
+ /**< LW 5 */
+ u32 func_id;
+ /**< MMP functionality Id */
+};
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_pke
+ * Definition of the QAT FW PKE request header mid section.
+ *
+ * @description
+ * PKE request message header middle structure
+ *
+ *****************************************************************************/
+struct icp_qat_fw_req_pke_mid {
+ /**< LWs 6-11 */
+ u64 opaque;
+ /**< Opaque data passed unmodified from the request to response messages
+ * by firmware (fw)
+ */
+
+ u64 src_data_addr;
+ /**< Generic definition of the source data supplied to the QAT AE. The
+ * common flags are used to further describe the attributes of this
+ * field
+ */
+
+ u64 dest_data_addr;
+ /**< Generic definition of the destination data supplied to the QAT AE.
+ * The common flags are used to further describe the attributes of this
+ * field
+ */
+};
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_pke
+ * Definition of the QAT FW PKE request header.
+ *
+ * @description
+ * PKE request message header structure
+ *
+ *****************************************************************************/
+struct icp_qat_fw_req_pke_hdr {
+ /**< LW0 */
+ u8 resrvd1;
+ /**< reserved field */
+
+ u8 resrvd2;
+ /**< reserved field */
+
+ u8 service_type;
+ /**< Service type */
+
+ u8 hdr_flags;
+ /**< This represents a flags field for the Service Request.
+ * The most significant bit is the 'valid' flag and the only
+ * one used. All remaining bit positions are unused and
+ * are therefore reserved and need to be set to 0.
+ */
+
+ /**< LW1 */
+ u16 comn_req_flags;
+ /**< Common Request flags must indicate flat buffer
+ * Common Request flags - PKE slice flags no longer used - slice
+ * allocated to a threadstrand.
+ */
+
+ u8 kpt_mask;
+ /** < KPT input parameters array mask, indicate which node in array is
+ *encrypted
+ */
+
+ u8 kpt_rn_mask;
+ /**< KPT random node(RN) mask - indicate which node is RN that QAT
+ * should generate itself.
+ */
+
+ /**< LWs 2-5 */
+ struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+ /**< PKE request message header pars structure */
+};
+
+/**
+ ***************************************************************************
+ *
+ * @ingroup icp_qat_fw_pke
+ *
+ * @brief
+ * PKE request message structure (64 bytes)
+ *
+ *****************************************************************************/
+struct icp_qat_fw_pke_request {
+ /**< LWs 0-5 */
+ struct icp_qat_fw_req_pke_hdr pke_hdr;
+ /**< Request header for PKE - CD Header/Param size must be zero */
+
+ /**< LWs 6-11 */
+ struct icp_qat_fw_req_pke_mid pke_mid;
+ /**< Request middle section for PKE */
+
+ /**< LW 12 */
+ u8 output_param_count;
+ /**< Number of output large integers for request */
+
+ u8 input_param_count;
+ /**< Number of input large integers for request */
+
+ u16 resrvd1;
+ /** Reserved **/
+
+ /**< LW 13 */
+ u32 resrvd2;
+ /**< Reserved */
+
+ /**< LWs 14-15 */
+ u64 next_req_adr;
+ /** < PKE - next request address */
+};
+
+/**
+ *****************************************************************************
+ *
+ * @ingroup icp_qat_fw_pke
+ *
+ * @brief
+ * PKE response message header structure
+ *
+ *****************************************************************************/
+struct icp_qat_fw_resp_pke_hdr {
+ /**< LW0 */
+ u8 resrvd1;
+ /**< Reserved */
+
+ u8 resrvd2;
+ /**< Reserved */
+
+ u8 response_type;
+ /**< Response type - copied from the request to the response message */
+
+ u8 hdr_flags;
+ /**< This represents a flags field for the Response.
+ * The most significant bit is the 'valid' flag and the only
+ * one used. All remaining bit positions are unused and
+ * are therefore reserved
+ */
+
+ /**< LW1 */
+ struct icp_qat_fw_pke_resp_status resp_status;
+
+ u16 resrvd4;
+ /**< Set to zero. */
+};
+
+/**
+ *****************************************************************************
+ *
+ * @ingroup icp_qat_fw_pke
+ *
+ * @brief
+ * PKE response message structure (32 bytes)
+ *
+ *****************************************************************************/
+struct icp_qat_fw_pke_resp {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+ /**< Response header for PKE */
+
+ /**< LWs 2-3 */
+ u64 opaque;
+ /**< Opaque data passed from the request to the response message */
+
+ /**< LWs 4-5 */
+ u64 src_data_addr;
+ /**< Generic definition of the source data supplied to the QAT AE. The
+ * common flags are used to further describe the attributes of this
+ * field
+ */
+
+ /**< LWs 6-7 */
+ u64 dest_data_addr;
+ /**< Generic definition of the destination data supplied to the QAT AE.
+ * The common flags are used to further describe the attributes of this
+ * field
+ */
+};
+
+/* ========================================================================= */
+/* MACRO DEFINITIONS */
+/* ========================================================================= */
+
+/**< @ingroup icp_qat_fw_pke
+ * Macro defining the bit position and mask of the 'valid' flag, within the
+ * hdr_flags field of LW0 (service request and response) of the PKE request
+ */
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1
+
+/**< @ingroup icp_qat_fw_pke
+ * Macro defining the bit position and mask of the PKE status flag, within the
+ * status field LW1 of a PKE response message
+ */
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+/**< @ingroup icp_qat_fw_pke
+ * Starting bit position indicating the PKE status flag within the PKE response
+ * pke_resp_flags byte.
+ */
+
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+/**< @ingroup icp_qat_fw_pke
+ * One bit mask used to determine PKE status mask
+ */
+
+/*
+ * < @ingroup icp_qat_fw_pke
+ * *** PKE Response Status Field Definition ***
+ * The PKE response follows the CPM 1.5 message format. The status field is
+ * 16 bits wide, where the status flags are contained within the most
+ * significant byte of the icp_qat_fw_pke_resp_status structure.
+ * The lower 8 bits of this word now contain the common error codes,
+ * which are defined in the common header file(*).
+ */
+/* +=====+-----+----+-----+-----+-----+-----+-----+-----+---------------------+
+ * | Bit | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | [7....0] |
+ * +=====+-----+----+-----+-----+-----+-----+-----+-----+---------------------+
+ * |Flags|Rsrvd|Pke |Rsrvd|Rsrvd|Rsrvd|Rsrvd|Rsrvd|Rsrvd|Common error codes(*)|
+ * +=====+-----+----+-----+-----+-----+-----+-----+-----+---------------------+
+ */
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Macro for extraction of the PKE bit from the 16-bit status field
+ * particular to a PKE response. The status flags are contained within
+ * the most significant byte of the word. The lower 8 bits of this status
+ * word now contain the common error codes, which are defined in the common
+ * header file. The appropriate macro definition to extract the PKE status
+ * lag from the PKE response assumes that a single byte i.e. pke_resp_flags
+ * is passed to the macro.
+ *
+ * @param status
+ * Status to extract the PKE status bit
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(flags) \
+ QAT_FIELD_GET((flags), QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Extract the valid flag from the PKE Request's header flags. Note that
+ * this invokes the common macro which may be used by either the request
+ * or the response.
+ *
+ * @param icp_qat_fw_req_pke_hdr Structure passed to extract the valid bit
+ * from the 'hdr_flags' field.
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_RQ_VALID_FLAG_GET(icp_qat_fw_req_pke_hdr) \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_GET(icp_qat_fw_req_pke_hdr)
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Set the valid bit in the PKE Request's header flags. Note that
+ * this invokes the common macro which may be used by either the request
+ * or the response.
+ *
+ * @param icp_qat_fw_req_pke_hdr Structure passed to set the valid bit.
+ * @param val Value of the valid bit flag.
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_RQ_VALID_FLAG_SET(icp_qat_fw_req_pke_hdr, val) \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(icp_qat_fw_req_pke_hdr, val)
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Extract the valid flag from the PKE Response's header flags. Note that
+ * invokes the common macro which may be used by either the request
+ * or the response.
+ *
+ * @param icp_qat_fw_resp_pke_hdr Structure to extract the valid bit
+ * from the 'hdr_flags' field.
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_RESP_VALID_FLAG_GET(icp_qat_fw_resp_pke_hdr) \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_GET(icp_qat_fw_resp_pke_hdr)
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Set the valid bit in the PKE Response's header flags. Note that
+ * this invokes the common macro which may be used by either the
+ * request or the response.
+ *
+ * @param icp_qat_fw_resp_pke_hdr Structure to set the valid bit
+ * @param val Value of the valid bit flag.
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_RESP_VALID_FLAG_SET(icp_qat_fw_resp_pke_hdr, val) \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(icp_qat_fw_resp_pke_hdr, val)
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Common macro to extract the valid flag from the header flags field
+ * within the header structure (request or response).
+ *
+ * @param hdr Structure (request or response) to extract the
+ * valid bit from the 'hdr_flags' field.
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_GET(hdr) \
+ QAT_FIELD_GET(hdr.hdr_flags, ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_pke
+ *
+ * @description
+ * Common macro to set the valid bit in the header flags field within
+ * the header structure (request or response).
+ *
+ * @param hdr Structure (request or response) containing the header
+ * flags field, to allow the valid bit to be set.
+ * @param val Value of the valid bit flag.
+ *
+ *****************************************************************************/
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr, val) \
+ QAT_FIELD_SET((hdr.hdr_flags), (val), \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+
+#endif /* _ICP_QAT_FW_PKE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h
new file mode 100644
index 000000000..cef64861f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ uint32_t config;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF
+#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
+#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1
+#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \
+ << QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \
+ (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \
+ ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \
+ (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ uint32_t counter;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_sha3_512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_AEAD_MODE = 4,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_auth_op {
+ ICP_QAT_HW_AUTH_VERIFY = 0,
+ ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS 10
+#define QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK 0x1F
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+
+#define QAT_CIPHER_AEAD_AAD_LOWER_SHIFT 24
+#define QAT_CIPHER_AEAD_AAD_UPPER_SHIFT 8
+#define QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK 0xFF
+#define QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK 0x3F
+#define QAT_CIPHER_AEAD_AAD_SIZE_BITPOS 16
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(aad_size) \
+ ({ \
+ typeof(aad_size) aad_size1 = aad_size; \
+ (((((aad_size1) >> QAT_CIPHER_AEAD_AAD_UPPER_SHIFT) & \
+ QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK) << \
+ QAT_CIPHER_AEAD_AAD_SIZE_BITPOS) | \
+ (((aad_size1) & QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK) << \
+ QAT_CIPHER_AEAD_AAD_LOWER_SHIFT)); \
+ })
+
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+
+/* These defines describe position of the bit-fields
+ * in the flags byte in B0
+ */
+#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
+#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
+
+#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
+ ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
+ | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
+ | ((q) - 1))
+
+#define ICP_QAT_HW_CCM_NQ_CONST 15
+#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
+#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
+#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
+ ICP_QAT_HW_CCM_AAD_LEN_INFO)
+#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
+#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
+#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
+
+struct icp_qat_hw_cipher_algo_blk {
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
+} __rte_cache_aligned;
+
+/* ========================================================================= */
+/* COMPRESSION SLICE */
+/* ========================================================================= */
+
+enum icp_qat_hw_compression_direction {
+ ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+ ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+
+enum icp_qat_hw_compression_depth {
+ ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4
+};
+
+enum icp_qat_hw_compression_file_type {
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \
+ dir, delayed, algo, depth, filetype) \
+ ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \
+ (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \
+ << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+ (((algo) & QAT_COMPRESSION_ALGO_MASK) \
+ << QAT_COMPRESSION_ALGO_BITPOS) | \
+ (((depth) & QAT_COMPRESSION_DEPTH_MASK) \
+ << QAT_COMPRESSION_DEPTH_BITPOS) | \
+ (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \
+ << QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h b/src/spdk/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h
new file mode 100644
index 000000000..42ffbbadd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _QAT_PKE_FUNCTIONALITY_ARRAYS_H_
+#define _QAT_PKE_FUNCTIONALITY_ARRAYS_H_
+
+#include "icp_qat_fw_mmp_ids.h"
+
+/*
+ * Modular exponentiation functionality IDs
+ */
+static const uint32_t MOD_EXP_SIZE[][2] = {
+ { 512, MATHS_MODEXP_L512 },
+ { 1024, MATHS_MODEXP_L1024 },
+ { 1536, MATHS_MODEXP_L1536 },
+ { 2048, MATHS_MODEXP_L2048 },
+ { 2560, MATHS_MODEXP_L2560 },
+ { 3072, MATHS_MODEXP_L3072 },
+ { 3584, MATHS_MODEXP_L3584 },
+ { 4096, MATHS_MODEXP_L4096 }
+};
+
+static const uint32_t MOD_INV_IDS_ODD[][2] = {
+ { 128, MATHS_MODINV_ODD_L128 },
+ { 192, MATHS_MODINV_ODD_L192 },
+ { 256, MATHS_MODINV_ODD_L256 },
+ { 384, MATHS_MODINV_ODD_L384 },
+ { 512, MATHS_MODINV_ODD_L512 },
+ { 768, MATHS_MODINV_ODD_L768 },
+ { 1024, MATHS_MODINV_ODD_L1024 },
+ { 1536, MATHS_MODINV_ODD_L1536 },
+ { 2048, MATHS_MODINV_ODD_L2048 },
+ { 3072, MATHS_MODINV_ODD_L3072 },
+ { 4096, MATHS_MODINV_ODD_L4096 },
+};
+
+static const uint32_t MOD_INV_IDS_EVEN[][2] = {
+ { 128, MATHS_MODINV_EVEN_L128 },
+ { 192, MATHS_MODINV_EVEN_L192 },
+ { 256, MATHS_MODINV_EVEN_L256 },
+ { 384, MATHS_MODINV_EVEN_L384 },
+ { 512, MATHS_MODINV_EVEN_L512 },
+ { 768, MATHS_MODINV_EVEN_L768 },
+ { 1024, MATHS_MODINV_EVEN_L1024 },
+ { 1536, MATHS_MODINV_EVEN_L1536 },
+ { 2048, MATHS_MODINV_EVEN_L2048 },
+ { 3072, MATHS_MODINV_EVEN_L3072 },
+ { 4096, MATHS_MODINV_EVEN_L4096 },
+};
+
+static const uint32_t RSA_ENC_IDS[][2] = {
+ { 512, PKE_RSA_EP_512 },
+ { 1024, PKE_RSA_EP_1024 },
+ { 1536, PKE_RSA_EP_1536 },
+ { 2048, PKE_RSA_EP_2048 },
+ { 3072, PKE_RSA_EP_3072 },
+ { 4096, PKE_RSA_EP_4096 },
+};
+
+static const uint32_t RSA_DEC_IDS[][2] = {
+ { 512, PKE_RSA_DP1_512 },
+ { 1024, PKE_RSA_DP1_1024 },
+ { 1536, PKE_RSA_DP1_1536 },
+ { 2048, PKE_RSA_DP1_2048 },
+ { 3072, PKE_RSA_DP1_3072 },
+ { 4096, PKE_RSA_DP1_4096 },
+};
+
+static const uint32_t RSA_DEC_CRT_IDS[][2] = {
+ { 512, PKE_RSA_DP2_512 },
+ { 1024, PKE_RSA_DP2_1024 },
+ { 1536, PKE_RSA_DP2_1536 },
+ { 2048, PKE_RSA_DP2_2048 },
+ { 3072, PKE_RSA_DP2_3072 },
+ { 4096, PKE_RSA_DP2_4096 },
+};
+
+#endif
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_common.c b/src/spdk/dpdk/drivers/common/qat/qat_common.c
new file mode 100644
index 000000000..5343a1451
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_common.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_common.h"
+#include "qat_device.h"
+#include "qat_logs.h"
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs)
+{
+ int res = -EINVAL;
+ uint32_t buf_len, nr;
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint8_t *virt_addr[max_segs];
+#endif
+
+ for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ continue;
+ }
+
+ list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset);
+#endif
+ offset = 0;
+ buf_len += list->buffers[nr].len;
+
+ if (buf_len >= data_len) {
+ list->buffers[nr].len -= buf_len - data_len;
+ res = 0;
+ break;
+ }
+ ++nr;
+ }
+
+ if (unlikely(res != 0)) {
+ if (nr == max_segs) {
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ max_segs);
+ } else {
+ QAT_DP_LOG(ERR, "Mbuf chain is too short");
+ }
+ } else {
+
+ list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (nr = 0; nr < list->num_bufs; nr++) {
+ QAT_DP_LOG(INFO,
+ "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ nr, list->buffers[nr].len,
+ list->buffers[nr].addr);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+ virt_addr[nr],
+ list->buffers[nr].len);
+ }
+#endif
+ }
+
+ return res;
+}
+
+void qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
+ stats, dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+ stats->threshold_hit_count += qp[i]->stats.threshold_hit_count;
+ QAT_LOG(DEBUG, "Threshold was used for qp %d %"PRIu64" times",
+ i, stats->threshold_hit_count);
+ }
+}
+
+void qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: dev %p, service %d",
+ dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ }
+
+ QAT_LOG(DEBUG, "QAT: %d stats cleared", service);
+}
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_common.h b/src/spdk/dpdk/drivers/common/qat/qat_common.h
new file mode 100644
index 000000000..cf840fea9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_common.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_COMMON_H_
+#define _QAT_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+/**< Intel(R) QAT device name for PCI registration */
+#define QAT_PCI_NAME qat
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/* Intel(R) QuickAssist Technology device generation is enumerated
+ * from one according to the generation of the device
+ */
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2,
+ QAT_GEN3
+};
+
+enum qat_service_type {
+ QAT_SERVICE_ASYMMETRIC = 0,
+ QAT_SERVICE_SYMMETRIC,
+ QAT_SERVICE_COMPRESSION,
+ QAT_SERVICE_INVALID
+};
+
+#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
+
+/**< Common struct for scatter-gather list operations */
+struct qat_flat_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+#define qat_sgl_hdr struct { \
+ uint64_t resrvd; \
+ uint32_t num_bufs; \
+ uint32_t num_mapped_bufs; \
+}
+
+__extension__
+struct qat_sgl {
+ qat_sgl_hdr;
+ /* flexible array of flat buffers*/
+ struct qat_flat_buf buffers[0];
+} __rte_packed __rte_cache_aligned;
+
+/** Common, i.e. not service-specific, statistics */
+struct qat_common_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+ uint64_t threshold_hit_count;
+ /**< Total number of times min qp threshold condition was fulfilled */
+
+};
+
+struct qat_pci_device;
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs);
+void
+qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service);
+void
+qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service);
+
+#endif /* _QAT_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_device.c b/src/spdk/dpdk/drivers/common/qat/qat_device.c
new file mode 100644
index 000000000..2b41d9a13
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_device.c
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_string_fns.h>
+#include <rte_devargs.h>
+#include <ctype.h>
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_sym_pmd.h"
+#include "qat_comp_pmd.h"
+
+/* Hardware device information per generation */
+__extension__
+struct qat_gen_hw_data qat_gen_config[] = {
+ [QAT_GEN1] = {
+ .dev_gen = QAT_GEN1,
+ .qp_hw_data = qat_gen1_qps,
+ .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN1
+ },
+ [QAT_GEN2] = {
+ .dev_gen = QAT_GEN2,
+ .qp_hw_data = qat_gen1_qps,
+ /* gen2 has same ring layout as gen1 */
+ .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN2
+ },
+ [QAT_GEN3] = {
+ .dev_gen = QAT_GEN3,
+ .qp_hw_data = qat_gen3_qps,
+ .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3
+ },
+};
+
+
+static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
+static int qat_nb_pci_devices;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0443),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x18a1),
+ },
+ {.device_id = 0},
+};
+
+static struct qat_pci_device *
+qat_pci_get_dev(uint8_t dev_id)
+{
+ return &qat_pci_devices[dev_id];
+}
+
+static struct qat_pci_device *
+qat_pci_get_named_dev(const char *name)
+{
+ struct qat_pci_device *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
+ dev = &qat_pci_devices[i];
+
+ if ((dev->attached == QAT_ATTACHED) &&
+ (strcmp(dev->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static uint8_t
+qat_pci_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
+ if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
+ break;
+ }
+ return dev_id;
+}
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
+{
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return qat_pci_get_named_dev(name);
+}
+
+static void qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param
+ *qat_dev_cmd_param)
+{
+ int i = 0;
+ const char *param;
+
+ while (1) {
+ char value_str[4] = { };
+
+ param = qat_dev_cmd_param[i].name;
+ if (param == NULL)
+ return;
+ long value = 0;
+ const char *arg = strstr(str, param);
+ const char *arg2 = NULL;
+
+ if (arg) {
+ arg2 = arg + strlen(param);
+ if (*arg2 != '=') {
+ QAT_LOG(DEBUG, "parsing error '=' sign"
+ " should immediately follow %s",
+ param);
+ arg2 = NULL;
+ } else
+ arg2++;
+ } else {
+ QAT_LOG(DEBUG, "%s not provided", param);
+ }
+ if (arg2) {
+ int iter = 0;
+ while (iter < 2) {
+ if (!isdigit(*(arg2 + iter)))
+ break;
+ iter++;
+ }
+ if (!iter) {
+ QAT_LOG(DEBUG, "parsing error %s"
+ " no number provided",
+ param);
+ } else {
+ memcpy(value_str, arg2, iter);
+ value = strtol(value_str, NULL, 10);
+ if (value > MAX_QP_THRESHOLD_SIZE) {
+ QAT_LOG(DEBUG, "Exceeded max size of"
+ " threshold, setting to %d",
+ MAX_QP_THRESHOLD_SIZE);
+ value = MAX_QP_THRESHOLD_SIZE;
+ }
+ QAT_LOG(DEBUG, "parsing %s = %ld",
+ param, value);
+ }
+ }
+ qat_dev_cmd_param[i].val = value;
+ i++;
+ }
+}
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+ struct qat_pci_device *qat_dev;
+ uint8_t qat_dev_id;
+ char name[QAT_DEV_NAME_MAX_LEN];
+ struct rte_devargs *devargs = pci_dev->device.devargs;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ if (qat_pci_get_named_dev(name) != NULL) {
+ QAT_LOG(ERR, "QAT device with name %s already allocated!",
+ name);
+ return NULL;
+ }
+
+ qat_dev_id = qat_pci_find_free_device_index();
+ if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
+ QAT_LOG(ERR, "Reached maximum number of QAT devices");
+ return NULL;
+ }
+
+ qat_dev = qat_pci_get_dev(qat_dev_id);
+ memset(qat_dev, 0, sizeof(*qat_dev));
+ strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
+ qat_dev->qat_dev_id = qat_dev_id;
+ qat_dev->pci_dev = pci_dev;
+ switch (qat_dev->pci_dev->id.device_id) {
+ case 0x0443:
+ qat_dev->qat_dev_gen = QAT_GEN1;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ qat_dev->qat_dev_gen = QAT_GEN2;
+ break;
+ case 0x18a1:
+ qat_dev->qat_dev_gen = QAT_GEN3;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+ return NULL;
+ }
+
+ if (devargs && devargs->drv_str)
+ qat_dev_parse_cmd(devargs->drv_str, qat_dev_cmd_param);
+
+ rte_spinlock_init(&qat_dev->arb_csr_lock);
+
+ qat_dev->attached = QAT_ATTACHED;
+
+ qat_nb_pci_devices++;
+
+ QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
+ qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
+
+ return qat_dev;
+}
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ qat_dev = qat_pci_get_named_dev(name);
+ if (qat_dev != NULL) {
+
+ /* Check that there are no service devs still on pci device */
+ if (qat_dev->sym_dev != NULL)
+ return -EBUSY;
+
+ qat_dev->attached = QAT_DETACHED;
+ qat_nb_pci_devices--;
+ }
+ QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
+ name, qat_nb_pci_devices);
+ return 0;
+}
+
+static int
+qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
+ struct rte_pci_device *pci_dev)
+{
+ qat_sym_dev_destroy(qat_pci_dev);
+ qat_comp_dev_destroy(qat_pci_dev);
+ qat_asym_dev_destroy(qat_pci_dev);
+ return qat_pci_device_release(pci_dev);
+}
+
+static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int sym_ret = 0, asym_ret = 0, comp_ret = 0;
+ int num_pmds_created = 0;
+ struct qat_pci_device *qat_pci_dev;
+ struct qat_dev_cmd_param qat_dev_cmd_param[] = {
+ { SYM_ENQ_THRESHOLD_NAME, 0 },
+ { ASYM_ENQ_THRESHOLD_NAME, 0 },
+ { COMP_ENQ_THRESHOLD_NAME, 0 },
+ { NULL, 0 },
+ };
+
+ QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ qat_pci_dev = qat_pci_device_allocate(pci_dev, qat_dev_cmd_param);
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ sym_ret = qat_sym_dev_create(qat_pci_dev, qat_dev_cmd_param);
+ if (sym_ret == 0) {
+ num_pmds_created++;
+
+ }
+ else
+ QAT_LOG(WARNING,
+ "Failed to create QAT SYM PMD on device %s",
+ qat_pci_dev->name);
+
+ comp_ret = qat_comp_dev_create(qat_pci_dev, qat_dev_cmd_param);
+ if (comp_ret == 0)
+ num_pmds_created++;
+ else
+ QAT_LOG(WARNING,
+ "Failed to create QAT COMP PMD on device %s",
+ qat_pci_dev->name);
+
+ asym_ret = qat_asym_dev_create(qat_pci_dev, qat_dev_cmd_param);
+ if (asym_ret == 0)
+ num_pmds_created++;
+ else
+ QAT_LOG(WARNING,
+ "Failed to create QAT ASYM PMD on device %s",
+ qat_pci_dev->name);
+
+ if (num_pmds_created == 0)
+ qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+
+ return 0;
+}
+
+static int qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_pci_dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
+ if (qat_pci_dev == NULL)
+ return 0;
+
+ return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = qat_pci_probe,
+ .remove = qat_pci_remove
+};
+
+__rte_weak int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
+ struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
+ struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
+ struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_device.h b/src/spdk/dpdk/drivers/common/qat/qat_device.h
new file mode 100644
index 000000000..09a4c55ed
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_device.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_DEVICE_H_
+#define _QAT_DEVICE_H_
+
+#include <rte_bus_pci.h>
+
+#include "qat_common.h"
+#include "qat_logs.h"
+#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
+
+#define QAT_DETACHED (0)
+#define QAT_ATTACHED (1)
+
+#define QAT_DEV_NAME_MAX_LEN 64
+
+#define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold"
+#define ASYM_ENQ_THRESHOLD_NAME "qat_asym_enq_threshold"
+#define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold"
+#define MAX_QP_THRESHOLD_SIZE 32
+
+struct qat_dev_cmd_param {
+ const char *name;
+ uint16_t val;
+};
+
+enum qat_comp_num_im_buffers {
+ QAT_NUM_INTERM_BUFS_GEN1 = 12,
+ QAT_NUM_INTERM_BUFS_GEN2 = 20,
+ QAT_NUM_INTERM_BUFS_GEN3 = 20
+};
+
+/*
+ * This struct holds all the data about a QAT pci device
+ * including data about all services it supports.
+ * It contains
+ * - hw_data
+ * - config data
+ * - runtime data
+ */
+struct qat_sym_dev_private;
+struct qat_asym_dev_private;
+struct qat_comp_dev_private;
+
+struct qat_pci_device {
+
+ /* Data used by all services */
+ char name[QAT_DEV_NAME_MAX_LEN];
+ /**< Name of qat pci device */
+ uint8_t qat_dev_id;
+ /**< Device instance for this qat pci device */
+ struct rte_pci_device *pci_dev;
+ /**< PCI information. */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
+ rte_spinlock_t arb_csr_lock;
+ /**< lock to protect accesses to the arbiter CSR */
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+
+ struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
+ /**< links to qps set up for each service, index same as on API */
+
+ /* Data relating to symmetric crypto service */
+ struct qat_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto sym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ /* Data relating to asymmetric crypto service */
+ struct qat_asym_dev_private *asym_dev;
+ /**< link back to cryptodev private data */
+ struct rte_device asym_rte_dev;
+ /**< This represents the crypto asym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ /* Data relating to compression service */
+ struct qat_comp_dev_private *comp_dev;
+ /**< link back to compressdev private data */
+ struct rte_device comp_rte_dev;
+ /**< This represents the compression subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a compression-specific name
+ */
+
+ /* Data relating to asymmetric crypto service */
+
+};
+
+struct qat_gen_hw_data {
+ enum qat_device_gen dev_gen;
+ const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
+ enum qat_comp_num_im_buffers comp_num_im_bufs_required;
+};
+
+extern struct qat_gen_hw_data qat_gen_config[];
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param);
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev);
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
+
+/* declaration needed for weak functions */
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
+ struct qat_dev_cmd_param *qat_dev_cmd_param);
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
+ struct qat_dev_cmd_param *qat_dev_cmd_param);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
+ struct qat_dev_cmd_param *qat_dev_cmd_param);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+#endif /* _QAT_DEVICE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_logs.c b/src/spdk/dpdk/drivers/common/qat/qat_logs.c
new file mode 100644
index 000000000..dfd0cbe5d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_logs.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_hexdump.h>
+
+#include "qat_logs.h"
+
+int qat_gen_logtype;
+int qat_dp_logtype;
+
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len)
+{
+ if (rte_log_can_log(logtype, level))
+ rte_hexdump(rte_log_get_stream(), title, buf, len);
+
+ return 0;
+}
+
+RTE_INIT(qat_pci_init_log)
+{
+ /* Non-data-path logging for pci device and all services */
+ qat_gen_logtype = rte_log_register("pmd.qat_general");
+ if (qat_gen_logtype >= 0)
+ rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE);
+
+ /* data-path logging for all services */
+ qat_dp_logtype = rte_log_register("pmd.qat_dp");
+ if (qat_dp_logtype >= 0)
+ rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_logs.h b/src/spdk/dpdk/drivers/common/qat/qat_logs.h
new file mode 100644
index 000000000..2e4d3945c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_logs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+extern int qat_gen_logtype;
+extern int qat_dp_logtype;
+
+#define QAT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_gen_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_dp_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \
+ qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len)
+
+/**
+ * qat_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream used by the rte_log infrastructure.
+ */
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len);
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_qp.c b/src/spdk/dpdk/drivers/common/qat/qat_qp.c
new file mode 100644
index 000000000..8e6dd04eb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_qp.c
@@ -0,0 +1,992 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_comp.h"
+#include "adf_transport_access_macros.h"
+
+#define QAT_CQ_MAX_DEQ_RETRIES 10
+
+#define ADF_MAX_DESC 4096
+#define ADF_MIN_DESC 128
+
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+__extension__
+const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 8,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+
+ }, {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 1,
+ .rx_ring_num = 9,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 2,
+ .rx_ring_num = 10,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ },
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 11,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 6,
+ .rx_ring_num = 14,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 7,
+ .rx_ring_num = 15,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ }
+};
+
+__extension__
+const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 4,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 1,
+ .rx_ring_num = 5,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 7,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ }
+};
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct qat_pci_device *qat_dev,
+ struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+
+
+int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service)
+{
+ int i, count;
+
+ for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
+ if (qp_hw_data[i].service_type == service)
+ count++;
+ return count;
+}
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ QAT_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ QAT_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+int qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr,
+ uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf)
+
+{
+ struct qat_qp *qp;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+
+ QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
+ queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
+
+ if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
+ (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
+ QAT_LOG(ERR, "Can't create qp for %u descriptors",
+ qat_qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ QAT_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE,
+ qat_qp_conf->socket_id);
+ if (qp == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->nb_descriptors = qat_qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
+ qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
+ if (qp->op_cookies == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for cookie");
+ rte_free(qp);
+ return -ENOMEM;
+ }
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+ qp->enqueued = qp->dequeued = 0;
+
+ if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
+ ADF_RING_DIR_TX) != 0) {
+ QAT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ qp->max_inflights = ADF_MAX_INFLIGHTS(qp->tx_q.queue_size,
+ ADF_BYTES_TO_MSG_SIZE(qp->tx_q.msg_size));
+
+ if (qp->max_inflights < 2) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
+ ADF_RING_DIR_RX) != 0) {
+ QAT_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
+ &qat_dev->arb_csr_lock);
+
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
+ "%s%d_cookies_%s_qp%hu",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qat_qp_conf->service_str, queue_pair_id);
+
+ QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ qat_qp_conf->cookie_size, 64, 0,
+ NULL, NULL, NULL, NULL,
+ qat_dev->pci_dev->device.numa_node,
+ 0);
+ if (!qp->op_cookie_pool) {
+ QAT_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ goto create_err;
+ }
+ memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
+ }
+
+ qp->qat_dev_gen = qat_dev->qat_dev_gen;
+ qp->build_request = qat_qp_conf->build_request;
+ qp->service_type = qat_qp_conf->hw->service_type;
+ qp->qat_dev = qat_dev;
+
+ QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
+ queue_pair_id, op_cookie_pool_name);
+
+ *qp_addr = qp;
+ return 0;
+
+create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_qp_release(struct qat_qp **qp_addr)
+{
+ struct qat_qp *qp = *qp_addr;
+ uint32_t i;
+
+ if (qp == NULL) {
+ QAT_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
+ qp->qat_dev->qat_dev_id);
+
+ /* Don't free memory if there are still responses to be processed */
+ if ((qp->enqueued - qp->dequeued) == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
+ &qp->qat_dev->arb_csr_lock);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ *qp_addr = NULL;
+ return 0;
+}
+
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ QAT_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
+ queue->hw_queue_number, queue->memz_name);
+
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ QAT_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ QAT_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
+ struct qat_qp_config *qp_conf, uint8_t dir)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ int ret = 0;
+ uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
+ uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
+
+ queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
+ queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
+
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d_%d",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qp_conf->service_str, "qp_mem",
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ qat_dev->pci_dev->device.numa_node);
+ if (qp_mz == NULL) {
+ QAT_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->iova;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ QAT_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ ret = -EFAULT;
+ goto queue_create_err;
+ }
+
+ if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
+ &(queue->queue_size)) != 0) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+
+ queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /* For fast calculation of cookie index, relies on msg_size being 2^n */
+ queue->trailz = __builtin_ctz(desc_size);
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+
+ QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
+ " nb msgs %u, msg_size %u, modulo mask %u",
+ queue->memz_name,
+ queue->queue_size, queue_size_bytes,
+ qp_conf->nb_descriptors, desc_size,
+ queue->modulo_mask);
+
+ return 0;
+
+queue_create_err:
+ rte_memzone_free(qp_mz);
+ return ret;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value &= ~(0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
+{
+ return data & modulo_mask;
+}
+
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->csr_tail = q->tail;
+}
+
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = q->csr_head;
+ new_head = q->head;
+ max_head = qp->nb_descriptors * q->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
+ }
+ q->nb_processed_responses = 0;
+ q->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ {
+ /* dequeued can only be written by one thread, but it may not
+ * be this thread. As it's 4-byte aligned it will be read
+ * atomically here by any Intel CPU.
+ * enqueued can wrap before dequeued, but cannot
+ * lap it as var size of enq/deq (uint32_t) > var size of
+ * max_inflights (uint16_t). In reality inflights is never
+ * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
+ * On wrapping, the calculation still returns the correct
+ * positive value as all three vars are unsigned.
+ */
+ uint32_t inflights =
+ tmp_qp->enqueued - tmp_qp->dequeued;
+
+ if ((inflights + nb_ops) > tmp_qp->max_inflights) {
+ nb_ops_possible = tmp_qp->max_inflights - inflights;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+ /* QAT has plenty of work queued already, so don't waste cycles
+ * enqueueing, wait til the application has gathered a bigger
+ * burst or some completed ops have been dequeued
+ */
+ if (tmp_qp->min_enq_burst_threshold && inflights >
+ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
+ tmp_qp->min_enq_burst_threshold) {
+ tmp_qp->stats.threshold_hit_count++;
+ return 0;
+ }
+ }
+
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = tmp_qp->build_request(*ops, base_addr + tail,
+ tmp_qp->op_cookies[tail >> queue->trailz],
+ tmp_qp->qat_dev_gen);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ /* This message cannot be enqueued */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->enqueued += nb_ops_sent;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ txq_write_tail(tmp_qp, queue);
+ return nb_ops_sent;
+}
+
+/* Use this for compression only - but keep consistent with above common
+ * function as much as possible.
+ */
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int nb_desc_to_build;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+
+ int descriptors_built, total_descriptors_built = 0;
+ int nb_remaining_descriptors;
+ int overflow = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ {
+ /* dequeued can only be written by one thread, but it may not
+ * be this thread. As it's 4-byte aligned it will be read
+ * atomically here by any Intel CPU.
+ * enqueued can wrap before dequeued, but cannot
+ * lap it as var size of enq/deq (uint32_t) > var size of
+ * max_inflights (uint16_t). In reality inflights is never
+ * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
+ * On wrapping, the calculation still returns the correct
+ * positive value as all three vars are unsigned.
+ */
+ uint32_t inflights =
+ tmp_qp->enqueued - tmp_qp->dequeued;
+
+ /* Find how many can actually fit on the ring */
+ overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
+ if (overflow > 0) {
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ /* QAT has plenty of work queued already, so don't waste cycles
+ * enqueueing, wait til the application has gathered a bigger
+ * burst or some completed ops have been dequeued
+ */
+ if (tmp_qp->min_enq_burst_threshold && inflights >
+ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
+ tmp_qp->min_enq_burst_threshold) {
+ tmp_qp->stats.threshold_hit_count++;
+ return 0;
+ }
+ }
+
+ /* At this point nb_ops_possible is assuming a 1:1 mapping
+ * between ops and descriptors.
+ * Fewer may be sent if some ops have to be split.
+ * nb_ops_possible is <= burst size.
+ * Find out how many spaces are actually available on the qp in case
+ * more are needed.
+ */
+ nb_remaining_descriptors = nb_ops_possible
+ + ((overflow >= 0) ? 0 : overflow * (-1));
+ QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
+ nb_ops, nb_remaining_descriptors);
+
+ while (nb_ops_sent != nb_ops_possible &&
+ nb_remaining_descriptors > 0) {
+ struct qat_comp_op_cookie *cookie =
+ tmp_qp->op_cookies[tail >> queue->trailz];
+
+ descriptors_built = 0;
+
+ QAT_DP_LOG(DEBUG, "--- data length: %u",
+ ((struct rte_comp_op *)*ops)->src.length);
+
+ nb_desc_to_build = qat_comp_build_request(*ops,
+ base_addr + tail, cookie, tmp_qp->qat_dev_gen);
+ QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
+ "%d ops sent, %d descriptors needed",
+ total_descriptors_built, nb_remaining_descriptors,
+ nb_ops_sent, nb_desc_to_build);
+
+ if (unlikely(nb_desc_to_build < 0)) {
+ /* this message cannot be enqueued */
+ tmp_qp->stats.enqueue_err_count++;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else if (unlikely(nb_desc_to_build > 1)) {
+ /* this op is too big and must be split - get more
+ * descriptors and retry
+ */
+
+ QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
+ nb_desc_to_build);
+
+ nb_remaining_descriptors -= nb_desc_to_build;
+ if (nb_remaining_descriptors >= 0) {
+ /* There are enough remaining descriptors
+ * so retry
+ */
+ int ret2 = qat_comp_build_multiple_requests(
+ *ops, tmp_qp, tail,
+ nb_desc_to_build);
+
+ if (unlikely(ret2 < 1)) {
+ QAT_DP_LOG(DEBUG,
+ "Failed to build (%d) descriptors, status %d",
+ nb_desc_to_build, ret2);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ tmp_qp->stats.enqueue_err_count++;
+
+ /* This message cannot be enqueued */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else {
+ descriptors_built = ret2;
+ total_descriptors_built +=
+ descriptors_built;
+ nb_remaining_descriptors -=
+ descriptors_built;
+ QAT_DP_LOG(DEBUG,
+ "Multiple descriptors (%d) built ok",
+ descriptors_built);
+ }
+ } else {
+ QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
+ "exceeds number of available descriptors (%d)",
+ nb_desc_to_build,
+ nb_remaining_descriptors +
+ nb_desc_to_build);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ /* Not enough extra descriptors */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+ } else {
+ descriptors_built = 1;
+ total_descriptors_built++;
+ nb_remaining_descriptors--;
+ QAT_DP_LOG(DEBUG, "Single descriptor built ok");
+ }
+
+ tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
+ queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->enqueued += total_descriptors_built;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ txq_write_tail(tmp_qp, queue);
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ struct qat_queue *rx_queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t head;
+ uint32_t op_resp_counter = 0, fw_resp_counter = 0;
+ uint8_t *resp_msg;
+ int nb_fw_responses;
+
+ rx_queue = &(tmp_qp->rx_q);
+ head = rx_queue->head;
+ resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ op_resp_counter != nb_ops) {
+
+ nb_fw_responses = 1;
+
+ if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
+ qat_sym_process_response(ops, resp_msg);
+ else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
+ nb_fw_responses = qat_comp_process_response(
+ ops, resp_msg,
+ tmp_qp->op_cookies[head >> rx_queue->trailz],
+ &tmp_qp->stats.dequeue_err_count);
+#ifdef BUILD_QAT_ASYM
+ else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
+ qat_asym_process_response(ops, resp_msg,
+ tmp_qp->op_cookies[head >> rx_queue->trailz]);
+#endif
+
+ head = adf_modulo(head + rx_queue->msg_size,
+ rx_queue->modulo_mask);
+
+ resp_msg = (uint8_t *)rx_queue->base_addr + head;
+
+ if (nb_fw_responses) {
+ /* only move on to next op if one was ready to return
+ * to API
+ */
+ ops++;
+ op_resp_counter++;
+ }
+
+ /* A compression op may be broken up into multiple fw requests.
+ * Only count fw responses as complete once ALL the responses
+ * associated with an op have been processed, as the cookie
+ * data from the first response must be available until
+ * finished with all firmware responses.
+ */
+ fw_resp_counter += nb_fw_responses;
+
+ rx_queue->nb_processed_responses++;
+ }
+
+ tmp_qp->dequeued += fw_resp_counter;
+ tmp_qp->stats.dequeued_count += op_resp_counter;
+
+ rx_queue->head = head;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+
+ QAT_DP_LOG(DEBUG, "Dequeue burst return: %u, QAT responses: %u",
+ op_resp_counter, fw_resp_counter);
+
+ return op_resp_counter;
+}
+
+/* This is almost same as dequeue_op_burst, without the atomic, without stats
+ * and without the op. Dequeues one response.
+ */
+static uint8_t
+qat_cq_dequeue_response(struct qat_qp *qp, void *out_data)
+{
+ uint8_t result = 0;
+ uint8_t retries = 0;
+ struct qat_queue *queue = &(qp->rx_q);
+ struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr + queue->head);
+
+ while (retries++ < QAT_CQ_MAX_DEQ_RETRIES &&
+ *(uint32_t *)resp_msg == ADF_RING_EMPTY_SIG) {
+ /* loop waiting for response until we reach the timeout */
+ rte_delay_ms(20);
+ }
+
+ if (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG) {
+ /* response received */
+ result = 1;
+
+ /* check status flag */
+ if (ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status) ==
+ ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+ /* success */
+ memcpy(out_data, resp_msg, queue->msg_size);
+ } else {
+ memset(out_data, 0, queue->msg_size);
+ }
+
+ queue->head = adf_modulo(queue->head + queue->msg_size,
+ queue->modulo_mask);
+ rxq_free_desc(qp, queue);
+ }
+
+ return result;
+}
+
+/* Sends a NULL message and extracts QAT fw version from the response.
+ * Used to determine detailed capabilities based on the fw version number.
+ * This assumes that there are no inflight messages, i.e. assumes there's space
+ * on the qp, one message is sent and only one response collected.
+ * Returns fw version number or 0 for unknown version or a negative error code.
+ */
+int
+qat_cq_get_fw_version(struct qat_qp *qp)
+{
+ struct qat_queue *queue = &(qp->tx_q);
+ uint8_t *base_addr = (uint8_t *)queue->base_addr;
+ struct icp_qat_fw_comn_req null_msg;
+ struct icp_qat_fw_comn_resp response;
+
+ /* prepare the NULL request */
+ memset(&null_msg, 0, sizeof(null_msg));
+ null_msg.comn_hdr.hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ null_msg.comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+ null_msg.comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "NULL request", &null_msg, sizeof(null_msg));
+#endif
+
+ /* send the NULL request */
+ memcpy(base_addr + queue->tail, &null_msg, sizeof(null_msg));
+ queue->tail = adf_modulo(queue->tail + queue->msg_size,
+ queue->modulo_mask);
+ txq_write_tail(qp, queue);
+
+ /* receive a response */
+ if (qat_cq_dequeue_response(qp, &response)) {
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "NULL response:", &response,
+ sizeof(response));
+#endif
+ /* if LW0 bit 24 is set - then the fw version was returned */
+ if (QAT_FIELD_GET(response.comn_hdr.hdr_flags,
+ ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS,
+ ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK))
+ return response.resrvd[0]; /* return LW4 */
+ else
+ return 0; /* not set - we don't know fw version */
+ }
+
+ QAT_LOG(ERR, "No response received");
+ return -EINVAL;
+}
+
+__rte_weak int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
+ void *op_cookie __rte_unused,
+ uint64_t *dequeue_err_count __rte_unused)
+{
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/common/qat/qat_qp.h b/src/spdk/dpdk/drivers/common/qat/qat_qp.h
new file mode 100644
index 000000000..575d69059
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/qat/qat_qp.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_QP_H_
+#define _QAT_QP_H_
+
+#include "qat_common.h"
+#include "adf_transport_access_macros.h"
+
+struct qat_pci_device;
+
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+
+#define QAT_QP_MIN_INFL_THRESHOLD 256
+
+typedef int (*build_request_t)(void *op,
+ uint8_t *req, void *op_cookie,
+ enum qat_device_gen qat_dev_gen);
+/**< Build a request from an op. */
+
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_hw_data {
+ enum qat_service_type service_type;
+ uint8_t hw_bundle_num;
+ uint8_t tx_ring_num;
+ uint8_t rx_ring_num;
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_config {
+ const struct qat_qp_hw_data *hw;
+ uint32_t nb_descriptors;
+ uint32_t cookie_size;
+ int socket_id;
+ build_request_t build_request;
+ const char *service_str;
+};
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo_mask;
+ uint32_t msg_size;
+ uint32_t queue_size;
+ uint8_t trailz;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+ uint16_t nb_processed_responses;
+ /* number of responses processed since last CSR head write */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct qat_common_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
+ build_request_t build_request;
+ enum qat_service_type service_type;
+ struct qat_pci_device *qat_dev;
+ /**< qat device this qp is on */
+ uint32_t enqueued;
+ uint32_t dequeued __rte_aligned(4);
+ uint16_t max_inflights;
+ uint16_t min_enq_burst_threshold;
+} __rte_cache_aligned;
+
+extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+int
+qat_qp_release(struct qat_qp **qp_addr);
+
+int
+qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr, uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf);
+
+int
+qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service);
+
+int
+qat_cq_get_fw_version(struct qat_qp *qp);
+
+/* Needed for weak function*/
+int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
+ void *op_cookie __rte_unused,
+ uint64_t *dequeue_err_count __rte_unused);
+
+#endif /* _QAT_QP_H_ */