diff options
Diffstat (limited to 'src/seastar/dpdk/drivers/common')
46 files changed, 11299 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/common/Makefile b/src/seastar/dpdk/drivers/common/Makefile new file mode 100644 index 000000000..87b8a59a4 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO),y) +DIRS-y += cpt +endif + +ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy) +DIRS-y += octeontx +endif + +MVEP-y := $(CONFIG_RTE_LIBRTE_MVPP2_PMD) +MVEP-y += $(CONFIG_RTE_LIBRTE_MVNETA_PMD) +MVEP-y += $(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) +ifneq (,$(findstring y,$(MVEP-y))) +DIRS-y += mvep +endif + +ifeq ($(CONFIG_RTE_LIBRTE_COMMON_DPAAX),y) +DIRS-y += dpaax +endif + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/src/seastar/dpdk/drivers/common/cpt/Makefile b/src/seastar/dpdk/drivers/common/cpt/Makefile new file mode 100644 index 000000000..2340aa961 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_common_cpt.a + +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/bus/pci +EXPORT_MAP := rte_common_cpt_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-y += cpt_pmd_ops_helper.c + +LDLIBS += -lrte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_common.h b/src/seastar/dpdk/drivers/common/cpt/cpt_common.h new file mode 100644 index 000000000..32f23ace2 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_common.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _CPT_COMMON_H_ +#define _CPT_COMMON_H_ + +#include <rte_mempool.h> + +/* + * This file defines common macros and structs + */ + +#define TIME_IN_RESET_COUNT 5 + +/* Default command timeout in seconds */ +#define DEFAULT_COMMAND_TIMEOUT 4 + +#define CPT_COUNT_THOLD 32 +#define CPT_TIMER_THOLD 0x3F + +#define AE_TYPE 1 +#define SE_TYPE 2 + +#ifndef ROUNDUP4 +#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) +#endif + +#ifndef ROUNDUP8 +#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) +#endif + +#ifndef ROUNDUP16 +#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0) +#endif + +#ifndef __hot +#define __hot __attribute__((hot)) +#endif + +#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++) + +struct cpt_qp_meta_info { + struct rte_mempool *pool; + int sg_mlen; + int lb_mlen; +}; + +struct rid { + /** Request id of a crypto operation */ + uintptr_t rid; +}; + +/* + * Pending queue structure + * + */ +struct pending_queue { + /** Pending requests count */ + uint64_t pending_count; + /** Array of pending requests */ + struct rid *rid_queue; + /** Tail of queue to be used for enqueue */ + uint16_t enq_tail; + /** Head of queue to be used for dequeue */ + uint16_t deq_head; +}; + +struct cpt_request_info { + /** Data path fields */ + uint64_t comp_baddr; + volatile uint64_t *completion_addr; + volatile uint64_t *alternate_caddr; + void *op; + struct { + uint64_t ei0; + uint64_t ei1; + uint64_t ei2; + uint64_t ei3; + } ist; + + /** Control path fields */ + uint64_t time_out; + uint8_t extra_time; +} __rte_cache_aligned; + +#endif /* _CPT_COMMON_H_ */ diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_hw_types.h b/src/seastar/dpdk/drivers/common/cpt/cpt_hw_types.h new file mode 100644 index 000000000..cff59c793 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_hw_types.h @@ -0,0 +1,522 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _CPT_HW_TYPES_H_ +#define _CPT_HW_TYPES_H_ + +#include <rte_byteorder.h> + +/* + * This file defines HRM specific structs. + * + */ + +#define CPT_VF_INTR_MBOX_MASK (1<<0) +#define CPT_VF_INTR_DOVF_MASK (1<<1) +#define CPT_VF_INTR_IRDE_MASK (1<<2) +#define CPT_VF_INTR_NWRP_MASK (1<<3) +#define CPT_VF_INTR_SWERR_MASK (1<<4) +#define CPT_VF_INTR_HWERR_MASK (1<<5) +#define CPT_VF_INTR_FAULT_MASK (1<<6) + +#define CPT_INST_SIZE (64) +#define CPT_NEXT_CHUNK_PTR_SIZE (8) + +/* + * CPT_INST_S software command definitions + * Words EI (0-3) + */ +typedef union { + uint64_t u64; + struct { + uint16_t opcode; + uint16_t param1; + uint16_t param2; + uint16_t dlen; + } s; +} vq_cmd_word0_t; + +typedef union { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t grp : 3; + uint64_t cptr : 61; +#else + uint64_t cptr : 61; + uint64_t grp : 3; +#endif + } s; +} vq_cmd_word3_t; + +typedef struct cpt_vq_command { + vq_cmd_word0_t cmd; + uint64_t dptr; + uint64_t rptr; + vq_cmd_word3_t cptr; +} cpt_vq_cmd_t; + +/** + * Structure cpt_inst_s + * + * CPT Instruction Structure + * This structure specifies the instruction layout. + * Instructions are stored in memory as little-endian unless + * CPT()_PF_Q()_CTL[INST_BE] is set. + */ +typedef union cpt_inst_s { + uint64_t u[8]; + struct cpt_inst_s_8s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_17_63 : 47; + /* [ 16: 16] Done interrupt. + * 0 = No interrupts related to this instruction. + * 1 = When the instruction completes,CPT()_VQ()_DONE[DONE] + * will be incremented, and based on the rules described + * there an interrupt may occur. + */ + uint64_t doneint : 1; + uint64_t reserved_0_15 : 16; +#else /* Word 0 - Little Endian */ + uint64_t reserved_0_15 : 16; + uint64_t doneint : 1; + uint64_t reserved_17_63 : 47; +#endif /* Word 0 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 1 - Big Endian */ + /* [127: 64] Result IOVA. + * If nonzero, specifies where to write CPT_RES_S. + * If zero, no result structure will be written. + * Address must be 16-byte aligned. + * + * Bits <63:49> are ignored by hardware; software should + * use a sign-extended bit <48> for forward compatibility. + */ + uint64_t res_addr : 64; +#else /* Word 1 - Little Endian */ + uint64_t res_addr : 64; +#endif /* Word 1 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 2 - Big Endian */ + uint64_t reserved_172_191 : 20; + /* [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to + * use when CPT submits work to SSO. + * For the SSO to not discard the add-work request, FPA_PF_MAP() + * must map [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid. + */ + uint64_t grp : 10; + /* [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use + * when CPT submits work to SSO. + */ + uint64_t tt : 2; + /* [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when + * CPT submits work to SSO. + */ + uint64_t tag : 32; +#else /* Word 2 - Little Endian */ + uint64_t tag : 32; + uint64_t tt : 2; + uint64_t grp : 10; + uint64_t reserved_172_191 : 20; +#endif /* Word 2 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 3 - Big Endian */ + /** [255:192] If [WQ_PTR] is nonzero, it is a pointer to a + * work-queue entry that CPT submits work to SSO after all + * context, output data, and result write operations are + * visible to other CNXXXX units and the cores. + * Bits <2:0> must be zero. + * Bits <63:49> are ignored by hardware; software should use a + * sign-extended bit <48> for forward compatibility. + * Internal:Bits <63:49>, <2:0> are ignored by hardware, + * treated as always 0x0. + **/ + uint64_t wq_ptr : 64; +#else /* Word 3 - Little Endian */ + uint64_t wq_ptr : 64; +#endif /* Word 3 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 4 - Big Endian */ + union { + /** [319:256] Engine instruction word 0. Passed to the + * AE/SE. + **/ + uint64_t ei0 : 64; + vq_cmd_word0_t vq_cmd_w0; + }; +#else /* Word 4 - Little Endian */ + union { + uint64_t ei0 : 64; + vq_cmd_word0_t vq_cmd_w0; + }; +#endif /* Word 4 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 5 - Big Endian */ + union { + /** [383:320] Engine instruction word 1. Passed to the + * AE/SE. + **/ + uint64_t ei1 : 64; + uint64_t dptr; + }; +#else /* Word 5 - Little Endian */ + union { + uint64_t ei1 : 64; + uint64_t dptr; + }; +#endif /* Word 5 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 6 - Big Endian */ + union { + /** [447:384] Engine instruction word 2. Passed to the + * AE/SE. + **/ + uint64_t ei2 : 64; + uint64_t rptr; + }; +#else /* Word 6 - Little Endian */ + union { + uint64_t ei2 : 64; + uint64_t rptr; + }; +#endif /* Word 6 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 7 - Big Endian */ + union { + /** [511:448] Engine instruction word 3. Passed to the + * AE/SE. + **/ + uint64_t ei3 : 64; + vq_cmd_word3_t vq_cmd_w3; + }; +#else /* Word 7 - Little Endian */ + union { + uint64_t ei3 : 64; + vq_cmd_word3_t vq_cmd_w3; + }; +#endif /* Word 7 - End */ + } s8x; +} cpt_inst_s_t; + +/** + * Structure cpt_res_s + * + * CPT Result Structure + * The CPT coprocessor writes the result structure after it completes a + * CPT_INST_S instruction. The result structure is exactly 16 bytes, and each + * instruction completion produces exactly one result structure. + * + * This structure is stored in memory as little-endian unless + * CPT()_PF_Q()_CTL[INST_BE] is set. + */ +typedef union cpt_res_s { + uint64_t u[2]; + struct cpt_res_s_8s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_17_63 : 47; + /** [ 16: 16] Done interrupt. This bit is copied from the + * corresponding instruction's CPT_INST_S[DONEINT]. + **/ + uint64_t doneint : 1; + uint64_t reserved_8_15 : 8; + /** [ 7: 0] Indicates completion/error status of the CPT + * coprocessor for the associated instruction, as enumerated by + * CPT_COMP_E. Core software may write the memory location + * containing [COMPCODE] to 0x0 before ringing the doorbell, and + * then poll for completion by checking for a nonzero value. + * + * Once the core observes a nonzero [COMPCODE] value in this + * case, the CPT coprocessor will have also completed L2/DRAM + * write operations. + **/ + uint64_t compcode : 8; +#else /* Word 0 - Little Endian */ + uint64_t compcode : 8; + uint64_t reserved_8_15 : 8; + uint64_t doneint : 1; + uint64_t reserved_17_63 : 47; +#endif /* Word 0 - End */ +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 1 - Big Endian */ + uint64_t reserved_64_127 : 64; +#else /* Word 1 - Little Endian */ + uint64_t reserved_64_127 : 64; +#endif /* Word 1 - End */ + } s8x; +} cpt_res_s_t; + +/** + * Register (NCB) cpt#_vq#_ctl + * + * CPT VF Queue Control Registers + * This register configures queues. This register should be changed (other than + * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]). + */ +typedef union { + uint64_t u; + struct cptx_vqx_ctl_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_1_63 : 63; + /** [ 0: 0](R/W/H) Enables the logical instruction queue. + * See also CPT()_PF_Q()_CTL[CONT_ERR] and + * CPT()_VQ()_INPROG[INFLIGHT]. + * 1 = Queue is enabled. + * 0 = Queue is disabled. + **/ + uint64_t ena : 1; +#else /* Word 0 - Little Endian */ + uint64_t ena : 1; + uint64_t reserved_1_63 : 63; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_ctl_t; + +/** + * Register (NCB) cpt#_vq#_done + * + * CPT Queue Done Count Registers + * These registers contain the per-queue instruction done count. + */ +typedef union { + uint64_t u; + struct cptx_vqx_done_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_20_63 : 44; + /** [ 19: 0](R/W/H) Done count. When CPT_INST_S[DONEINT] set + * and that instruction completes,CPT()_VQ()_DONE[DONE] is + * incremented when the instruction finishes. Write to this + * field are for diagnostic use only; instead software writes + * CPT()_VQ()_DONE_ACK with the number of decrements for this + * field. + * + * Interrupts are sent as follows: + * + * When CPT()_VQ()_DONE[DONE] = 0, then no results are pending, + * the interrupt coalescing timer is held to zero, and an + * interrupt is not sent. + * + * When CPT()_VQ()_DONE[DONE] != 0, then the interrupt + * coalescing timer counts. If the counter is >= CPT()_VQ()_DONE + * _WAIT[TIME_WAIT]*1024, or CPT()_VQ()_DONE[DONE] >= CPT()_VQ() + * _DONE_WAIT[NUM_WAIT], i.e. enough time has passed or enough + * results have arrived, then the interrupt is sent. Otherwise, + * it is not sent due to coalescing. + * + * When CPT()_VQ()_DONE_ACK is written (or CPT()_VQ()_DONE is + * written but this is not typical), the interrupt coalescing + * timer restarts. Note after decrementing this interrupt + * equation is recomputed, for example if CPT()_VQ()_DONE[DONE] + * >= CPT()_VQ()_DONE_WAIT[NUM_WAIT] and because the timer is + * zero, the interrupt will be resent immediately. (This covers + * the race case between software acknowledging an interrupt and + * a result returning.) + * + * When CPT()_VQ()_DONE_ENA_W1S[DONE] = 0, interrupts are not + * sent, but the counting described above still occurs. + * + * Since CPT instructions complete out-of-order, if software is + * using completion interrupts the suggested scheme is to + * request a DONEINT on each request, and when an interrupt + * arrives perform a "greedy" scan for completions; even if a + * later command is acknowledged first this will not result in + * missing a completion. + * + * Software is responsible for making sure [DONE] does not + * overflow; for example by insuring there are not more than + * 2^20-1 instructions in flight that may request interrupts. + **/ + uint64_t done : 20; +#else /* Word 0 - Little Endian */ + uint64_t done : 20; + uint64_t reserved_20_63 : 44; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_done_t; + +/** + * Register (NCB) cpt#_vq#_done_ack + * + * CPT Queue Done Count Ack Registers + * This register is written by software to acknowledge interrupts. + */ +typedef union { + uint64_t u; + struct cptx_vqx_done_ack_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_20_63 : 44; + /** [ 19: 0](R/W/H) Number of decrements to CPT()_VQ()_DONE + * [DONE]. Reads CPT()_VQ()_DONE[DONE]. + * + * Written by software to acknowledge interrupts. If CPT()_VQ()_ + * DONE[DONE] is still nonzero the interrupt will be re-sent if + * the conditions described in CPT()_VQ()_DONE[DONE] are + * satisfied. + **/ + uint64_t done_ack : 20; +#else /* Word 0 - Little Endian */ + uint64_t done_ack : 20; + uint64_t reserved_20_63 : 44; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_done_ack_t; + +/** + * Register (NCB) cpt#_vq#_done_wait + * + * CPT Queue Done Interrupt Coalescing Wait Registers + * Specifies the per queue interrupt coalescing settings. + */ +typedef union { + uint64_t u; + struct cptx_vqx_done_wait_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_48_63 : 16; + /** [ 47: 32](R/W) Time hold-off. When CPT()_VQ()_DONE[DONE] = + * 0, or CPT()_VQ()_DONE_ACK is written a timer is cleared. When + * the timer reaches [TIME_WAIT]*1024 then interrupt coalescing + * ends; see CPT()_VQ()_DONE[DONE]. If 0x0, time coalescing is + * disabled. + **/ + uint64_t time_wait : 16; + uint64_t reserved_20_31 : 12; + /** [ 19: 0](R/W) Number of messages hold-off. When + * CPT()_VQ()_DONE[DONE] >= [NUM_WAIT] then interrupt coalescing + * ends; see CPT()_VQ()_DONE[DONE]. If 0x0, same behavior as + * 0x1. + **/ + uint64_t num_wait : 20; +#else /* Word 0 - Little Endian */ + uint64_t num_wait : 20; + uint64_t reserved_20_31 : 12; + uint64_t time_wait : 16; + uint64_t reserved_48_63 : 16; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_done_wait_t; + +/** + * Register (NCB) cpt#_vq#_doorbell + * + * CPT Queue Doorbell Registers + * Doorbells for the CPT instruction queues. + */ +typedef union { + uint64_t u; + struct cptx_vqx_doorbell_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_20_63 : 44; + uint64_t dbell_cnt : 20; + /** [ 19: 0](R/W/H) Number of instruction queue 64-bit words + * to add to the CPT instruction doorbell count. Readback value + * is the the current number of pending doorbell requests. + * + * If counter overflows CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. + * + * To reset the count back to zero, write one to clear + * CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF], then write a value + * of 2^20 minus the read [DBELL_CNT], then write one to + * CPT()_VQ()_MISC_INT_W1C[DBELL_DOVF] and + * CPT()_VQ()_MISC_INT_ENA_W1S[DBELL_DOVF]. + * + * Must be a multiple of 8. All CPT instructions are 8 words + * and require a doorbell count of multiple of 8. + **/ +#else /* Word 0 - Little Endian */ + uint64_t dbell_cnt : 20; + uint64_t reserved_20_63 : 44; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_doorbell_t; + +/** + * Register (NCB) cpt#_vq#_inprog + * + * CPT Queue In Progress Count Registers + * These registers contain the per-queue instruction in flight registers. + */ +typedef union { + uint64_t u; + struct cptx_vqx_inprog_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_8_63 : 56; + /** [ 7: 0](RO/H) Inflight count. Counts the number of + * instructions for the VF for which CPT is fetching, executing + * or responding to instructions. However this does not include + * any interrupts that are awaiting software handling + * (CPT()_VQ()_DONE[DONE] != 0x0). + * + * A queue may not be reconfigured until: + * 1. CPT()_VQ()_CTL[ENA] is cleared by software. + * 2. [INFLIGHT] is polled until equals to zero. + **/ + uint64_t inflight : 8; +#else /* Word 0 - Little Endian */ + uint64_t inflight : 8; + uint64_t reserved_8_63 : 56; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_inprog_t; + +/** + * Register (NCB) cpt#_vq#_misc_int + * + * CPT Queue Misc Interrupt Register + * These registers contain the per-queue miscellaneous interrupts. + */ +typedef union { + uint64_t u; + struct cptx_vqx_misc_int_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_7_63 : 57; + /** [ 6: 6](R/W1C/H) Translation fault detected. */ + uint64_t fault : 1; + /** [ 5: 5](R/W1C/H) Hardware error from engines. */ + uint64_t hwerr : 1; + /** [ 4: 4](R/W1C/H) Software error from engines. */ + uint64_t swerr : 1; + /** [ 3: 3](R/W1C/H) NCB result write response error. */ + uint64_t nwrp : 1; + /** [ 2: 2](R/W1C/H) Instruction NCB read response error. */ + uint64_t irde : 1; + /** [ 1: 1](R/W1C/H) Doorbell overflow. */ + uint64_t dovf : 1; + /** [ 0: 0](R/W1C/H) PF to VF mailbox interrupt. Set when + * CPT()_VF()_PF_MBOX(0) is written. + **/ + uint64_t mbox : 1; +#else /* Word 0 - Little Endian */ + uint64_t mbox : 1; + uint64_t dovf : 1; + uint64_t irde : 1; + uint64_t nwrp : 1; + uint64_t swerr : 1; + uint64_t hwerr : 1; + uint64_t fault : 1; + uint64_t reserved_5_63 : 59; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_misc_int_t; + +/** + * Register (NCB) cpt#_vq#_saddr + * + * CPT Queue Starting Buffer Address Registers + * These registers set the instruction buffer starting address. + */ +typedef union { + uint64_t u; + struct cptx_vqx_saddr_s { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */ + uint64_t reserved_49_63 : 15; + /** [ 48: 6](R/W/H) Instruction buffer IOVA <48:6> + * (64-byte aligned). When written, it is the initial buffer + * starting address; when read, it is the next read pointer to + * be requested from L2C. The PTR field is overwritten with the + * next pointer each time that the command buffer segment is + * exhausted. New commands will then be read from the newly + * specified command buffer pointer. + **/ + uint64_t ptr : 43; + uint64_t reserved_0_5 : 6; +#else /* Word 0 - Little Endian */ + uint64_t reserved_0_5 : 6; + uint64_t ptr : 43; + uint64_t reserved_49_63 : 15; +#endif /* Word 0 - End */ + } s; +} cptx_vqx_saddr_t; + +#endif /*_CPT_HW_TYPES_H_ */ diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_mcode_defines.h b/src/seastar/dpdk/drivers/common/cpt/cpt_mcode_defines.h new file mode 100644 index 000000000..c0adbd55f --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_mcode_defines.h @@ -0,0 +1,398 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _CPT_MCODE_DEFINES_H_ +#define _CPT_MCODE_DEFINES_H_ + +#include <rte_byteorder.h> +#include <rte_memory.h> + +/* + * This file defines macros and structures according to microcode spec + * + */ +/* SE opcodes */ +#define CPT_MAJOR_OP_FC 0x33 +#define CPT_MAJOR_OP_HASH 0x34 +#define CPT_MAJOR_OP_HMAC 0x35 +#define CPT_MAJOR_OP_ZUC_SNOW3G 0x37 +#define CPT_MAJOR_OP_KASUMI 0x38 +#define CPT_MAJOR_OP_MISC 0x01 + +#define CPT_BYTE_16 16 +#define CPT_BYTE_24 24 +#define CPT_BYTE_32 32 +#define CPT_MAX_SG_IN_OUT_CNT 32 +#define CPT_MAX_SG_CNT (CPT_MAX_SG_IN_OUT_CNT/2) + +#define COMPLETION_CODE_SIZE 8 +#define COMPLETION_CODE_INIT 0 + +#define SG_LIST_HDR_SIZE (8u) +#define SG_ENTRY_SIZE sizeof(sg_comp_t) + +#define CPT_DMA_MODE (1 << 7) + +#define CPT_FROM_CTX 0 +#define CPT_FROM_DPTR 1 + +#define FC_GEN 0x1 +#define ZUC_SNOW3G 0x2 +#define KASUMI 0x3 +#define HASH_HMAC 0x4 + +#define ZS_EA 0x1 +#define ZS_IA 0x2 +#define K_F8 0x4 +#define K_F9 0x8 + +#define CPT_OP_CIPHER_ENCRYPT 0x1 +#define CPT_OP_CIPHER_DECRYPT 0x2 +#define CPT_OP_CIPHER_MASK 0x3 + +#define CPT_OP_AUTH_VERIFY 0x4 +#define CPT_OP_AUTH_GENERATE 0x8 +#define CPT_OP_AUTH_MASK 0xC + +#define CPT_OP_ENCODE (CPT_OP_CIPHER_ENCRYPT | CPT_OP_AUTH_GENERATE) +#define CPT_OP_DECODE (CPT_OP_CIPHER_DECRYPT | CPT_OP_AUTH_VERIFY) + +/* #define CPT_ALWAYS_USE_SG_MODE */ +#define CPT_ALWAYS_USE_SEPARATE_BUF + +/* + * Parameters for Flexi Crypto + * requests + */ +#define VALID_AAD_BUF 0x01 +#define VALID_MAC_BUF 0x02 +#define VALID_IV_BUF 0x04 +#define SINGLE_BUF_INPLACE 0x08 +#define SINGLE_BUF_HEADTAILROOM 0x10 + +#define ENCR_IV_OFFSET(__d_offs) ((__d_offs >> 32) & 0xffff) +#define ENCR_OFFSET(__d_offs) ((__d_offs >> 16) & 0xffff) +#define AUTH_OFFSET(__d_offs) (__d_offs & 0xffff) +#define ENCR_DLEN(__d_lens) (__d_lens >> 32) +#define AUTH_DLEN(__d_lens) (__d_lens & 0xffffffff) + +/* FC offset_control at start of DPTR in bytes */ +#define OFF_CTRL_LEN 8 /**< bytes */ + +typedef enum { + MD5_TYPE = 1, + SHA1_TYPE = 2, + SHA2_SHA224 = 3, + SHA2_SHA256 = 4, + SHA2_SHA384 = 5, + SHA2_SHA512 = 6, + GMAC_TYPE = 7, + XCBC_TYPE = 8, + SHA3_SHA224 = 10, + SHA3_SHA256 = 11, + SHA3_SHA384 = 12, + SHA3_SHA512 = 13, + SHA3_SHAKE256 = 14, + SHA3_SHAKE512 = 15, + + /* These are only for software use */ + ZUC_EIA3 = 0x90, + SNOW3G_UIA2 = 0x91, + KASUMI_F9_CBC = 0x92, + KASUMI_F9_ECB = 0x93, +} mc_hash_type_t; + +typedef enum { + /* To support passthrough */ + PASSTHROUGH = 0x0, + /* + * These are defined by MC for Flexi crypto + * for field of 4 bits + */ + DES3_CBC = 0x1, + DES3_ECB = 0x2, + AES_CBC = 0x3, + AES_ECB = 0x4, + AES_CFB = 0x5, + AES_CTR = 0x6, + AES_GCM = 0x7, + AES_XTS = 0x8, + + /* These are only for software use */ + ZUC_EEA3 = 0x90, + SNOW3G_UEA2 = 0x91, + KASUMI_F8_CBC = 0x92, + KASUMI_F8_ECB = 0x93, +} mc_cipher_type_t; + +typedef enum { + AES_128_BIT = 0x1, + AES_192_BIT = 0x2, + AES_256_BIT = 0x3 +} mc_aes_type_t; + +typedef enum { + /* Microcode errors */ + NO_ERR = 0x00, + ERR_OPCODE_UNSUPPORTED = 0x01, + + /* SCATTER GATHER */ + ERR_SCATTER_GATHER_WRITE_LENGTH = 0x02, + ERR_SCATTER_GATHER_LIST = 0x03, + ERR_SCATTER_GATHER_NOT_SUPPORTED = 0x04, + + /* SE GC */ + ERR_GC_LENGTH_INVALID = 0x41, + ERR_GC_RANDOM_LEN_INVALID = 0x42, + ERR_GC_DATA_LEN_INVALID = 0x43, + ERR_GC_DRBG_TYPE_INVALID = 0x44, + ERR_GC_CTX_LEN_INVALID = 0x45, + ERR_GC_CIPHER_UNSUPPORTED = 0x46, + ERR_GC_AUTH_UNSUPPORTED = 0x47, + ERR_GC_OFFSET_INVALID = 0x48, + ERR_GC_HASH_MODE_UNSUPPORTED = 0x49, + ERR_GC_DRBG_ENTROPY_LEN_INVALID = 0x4a, + ERR_GC_DRBG_ADDNL_LEN_INVALID = 0x4b, + ERR_GC_ICV_MISCOMPARE = 0x4c, + ERR_GC_DATA_UNALIGNED = 0x4d, + + /* API Layer */ + ERR_BAD_ALT_CCODE = 0xfd, + ERR_REQ_PENDING = 0xfe, + ERR_REQ_TIMEOUT = 0xff, + + ERR_BAD_INPUT_LENGTH = (0x40000000 | 384), /* 0x40000180 */ + ERR_BAD_KEY_LENGTH, + ERR_BAD_KEY_HANDLE, + ERR_BAD_CONTEXT_HANDLE, + ERR_BAD_SCALAR_LENGTH, + ERR_BAD_DIGEST_LENGTH, + ERR_BAD_INPUT_ARG, + ERR_BAD_RECORD_PADDING, + ERR_NB_REQUEST_PENDING, + ERR_EIO, + ERR_ENODEV, +} mc_error_code_t; + +/** + * Enumeration cpt_comp_e + * + * CPT Completion Enumeration + * Enumerates the values of CPT_RES_S[COMPCODE]. + */ +typedef enum { + CPT_8X_COMP_E_NOTDONE = (0x00), + CPT_8X_COMP_E_GOOD = (0x01), + CPT_8X_COMP_E_FAULT = (0x02), + CPT_8X_COMP_E_SWERR = (0x03), + CPT_8X_COMP_E_HWERR = (0x04), + CPT_8X_COMP_E_LAST_ENTRY = (0xFF) +} cpt_comp_e_t; + +typedef struct sglist_comp { + union { + uint64_t len; + struct { + uint16_t len[4]; + } s; + } u; + uint64_t ptr[4]; +} sg_comp_t; + +struct cpt_sess_misc { + /** CPT opcode */ + uint16_t cpt_op:4; + /** ZUC, SNOW3G & KASUMI flags */ + uint16_t zsk_flag:4; + /** Flag for AES GCM */ + uint16_t aes_gcm:1; + /** Flag for AES CTR */ + uint16_t aes_ctr:1; + /** Flag for NULL cipher/auth */ + uint16_t is_null:1; + /** Flag for GMAC */ + uint16_t is_gmac:1; + /** AAD length */ + uint16_t aad_length; + /** MAC len in bytes */ + uint8_t mac_len; + /** IV length in bytes */ + uint8_t iv_length; + /** Auth IV length in bytes */ + uint8_t auth_iv_length; + /** Reserved field */ + uint8_t rsvd1; + /** IV offset in bytes */ + uint16_t iv_offset; + /** Auth IV offset in bytes */ + uint16_t auth_iv_offset; + /** Salt */ + uint32_t salt; + /** Context DMA address */ + phys_addr_t ctx_dma_addr; +}; + +typedef union { + uint64_t flags; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t enc_cipher : 4; + uint64_t reserved1 : 1; + uint64_t aes_key : 2; + uint64_t iv_source : 1; + uint64_t hash_type : 4; + uint64_t reserved2 : 3; + uint64_t auth_input_type : 1; + uint64_t mac_len : 8; + uint64_t reserved3 : 8; + uint64_t encr_offset : 16; + uint64_t iv_offset : 8; + uint64_t auth_offset : 8; +#else + uint64_t auth_offset : 8; + uint64_t iv_offset : 8; + uint64_t encr_offset : 16; + uint64_t reserved3 : 8; + uint64_t mac_len : 8; + uint64_t auth_input_type : 1; + uint64_t reserved2 : 3; + uint64_t hash_type : 4; + uint64_t iv_source : 1; + uint64_t aes_key : 2; + uint64_t reserved1 : 1; + uint64_t enc_cipher : 4; +#endif + } e; +} encr_ctrl_t; + +typedef struct { + encr_ctrl_t enc_ctrl; + uint8_t encr_key[32]; + uint8_t encr_iv[16]; +} mc_enc_context_t; + +typedef struct { + uint8_t ipad[64]; + uint8_t opad[64]; +} mc_fc_hmac_context_t; + +typedef struct { + mc_enc_context_t enc; + mc_fc_hmac_context_t hmac; +} mc_fc_context_t; + +typedef struct { + uint8_t encr_auth_iv[16]; + uint8_t ci_key[16]; + uint8_t zuc_const[32]; +} mc_zuc_snow3g_ctx_t; + +typedef struct { + uint8_t reg_A[8]; + uint8_t ci_key[16]; +} mc_kasumi_ctx_t; + +struct cpt_ctx { + /* Below fields are accessed by sw */ + uint64_t enc_cipher :8; + uint64_t hash_type :8; + uint64_t mac_len :8; + uint64_t auth_key_len :8; + uint64_t fc_type :4; + uint64_t hmac :1; + uint64_t zsk_flags :3; + uint64_t k_ecb :1; + uint64_t snow3g :1; + uint64_t rsvd :22; + /* Below fields are accessed by hardware */ + union { + mc_fc_context_t fctx; + mc_zuc_snow3g_ctx_t zs_ctx; + mc_kasumi_ctx_t k_ctx; + }; + uint8_t auth_key[64]; +}; + +/* Buffer pointer */ +typedef struct buf_ptr { + void *vaddr; + phys_addr_t dma_addr; + uint32_t size; + uint32_t resv; +} buf_ptr_t; + +/* IOV Pointer */ +typedef struct{ + int buf_cnt; + buf_ptr_t bufs[0]; +} iov_ptr_t; + +typedef union opcode_info { + uint16_t flags; + struct { + uint8_t major; + uint8_t minor; + } s; +} opcode_info_t; + +typedef struct fc_params { + /* 0th cache line */ + union { + buf_ptr_t bufs[1]; + struct { + iov_ptr_t *src_iov; + iov_ptr_t *dst_iov; + }; + }; + void *iv_buf; + void *auth_iv_buf; + buf_ptr_t meta_buf; + buf_ptr_t ctx_buf; + uint64_t rsvd2; + + /* 1st cache line */ + buf_ptr_t aad_buf; + buf_ptr_t mac_buf; + +} fc_params_t; + +/* + * Parameters for digest + * generate requests + * Only src_iov, op, ctx_buf, mac_buf, prep_req + * meta_buf, auth_data_len are used for digest gen. + */ +typedef struct fc_params digest_params_t; + +/* Cipher Algorithms */ +typedef mc_cipher_type_t cipher_type_t; + +/* Auth Algorithms */ +typedef mc_hash_type_t auth_type_t; + +/* Helper macros */ + +#define CPT_P_ENC_CTRL(fctx) fctx->enc.enc_ctrl.e + +#define SRC_IOV_SIZE \ + (sizeof(iov_ptr_t) + (sizeof(buf_ptr_t) * CPT_MAX_SG_CNT)) +#define DST_IOV_SIZE \ + (sizeof(iov_ptr_t) + (sizeof(buf_ptr_t) * CPT_MAX_SG_CNT)) + +#define SESS_PRIV(__sess) \ + (void *)((uint8_t *)__sess + sizeof(struct cpt_sess_misc)) + +/* + * Get the session size + * + * @return + * - session size + */ +static __rte_always_inline unsigned int +cpt_get_session_size(void) +{ + unsigned int ctx_len = sizeof(struct cpt_ctx); + return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8)); +} +#endif /* _CPT_MCODE_DEFINES_H_ */ diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_logs.h b/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_logs.h new file mode 100644 index 000000000..4cbec4e36 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_logs.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _CPT_PMD_LOGS_H_ +#define _CPT_PMD_LOGS_H_ + +#include <rte_log.h> + +/* + * This file defines log macros + */ + +#define CPT_PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, cpt_logtype, \ + "cpt: %s(): " fmt "\n", __func__, ##args) + +#define CPT_PMD_INIT_FUNC_TRACE() CPT_PMD_DRV_LOG_RAW(DEBUG, " >>") + +#define CPT_LOG_INFO(fmt, args...) \ + CPT_PMD_DRV_LOG_RAW(INFO, fmt, ## args) +#define CPT_LOG_WARN(fmt, args...) \ + CPT_PMD_DRV_LOG_RAW(WARNING, fmt, ## args) +#define CPT_LOG_ERR(fmt, args...) \ + CPT_PMD_DRV_LOG_RAW(ERR, fmt, ## args) + +/* + * DP logs, toggled out at compile time if level lower than current level. + * DP logs would be logged under 'PMD' type. So for dynamic logging, the + * level of 'pmd' has to be used. + */ +#define CPT_LOG_DP(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt "\n", ## args) + +#define CPT_LOG_DP_DEBUG(fmt, args...) \ + CPT_LOG_DP(DEBUG, fmt, ## args) +#define CPT_LOG_DP_INFO(fmt, args...) \ + CPT_LOG_DP(INFO, fmt, ## args) +#define CPT_LOG_DP_WARN(fmt, args...) \ + CPT_LOG_DP(WARNING, fmt, ## args) +#define CPT_LOG_DP_ERR(fmt, args...) \ + CPT_LOG_DP(ERR, fmt, ## args) + +/* + * cpt_logtype will be used for common logging. This field would be initialized + * by otx_* driver routines during PCI probe. + */ +int cpt_logtype; + +#endif /* _CPT_PMD_LOGS_H_ */ diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c b/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c new file mode 100644 index 000000000..1c18180f8 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.c @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#include <rte_common.h> + +#include "cpt_common.h" +#include "cpt_hw_types.h" +#include "cpt_mcode_defines.h" +#include "cpt_pmd_ops_helper.h" + +#define CPT_MAX_IV_LEN 16 +#define CPT_OFFSET_CONTROL_BYTES 8 + +int32_t +cpt_pmd_ops_helper_get_mlen_direct_mode(void) +{ + uint32_t len = 0; + + /* Request structure */ + len = sizeof(struct cpt_request_info); + + /* CPT HW result structure plus extra as it is aligned */ + len += 2*sizeof(cpt_res_s_t); + + return len; +} + +int +cpt_pmd_ops_helper_get_mlen_sg_mode(void) +{ + uint32_t len = 0; + + len += sizeof(struct cpt_request_info); + len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN; + len += ROUNDUP8(SG_LIST_HDR_SIZE + + (ROUNDUP4(CPT_MAX_SG_IN_OUT_CNT) >> 2) * SG_ENTRY_SIZE); + len += 2 * COMPLETION_CODE_SIZE; + len += 2 * sizeof(cpt_res_s_t); + return len; +} diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h b/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h new file mode 100644 index 000000000..dd32f9a40 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_pmd_ops_helper.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _CPT_PMD_OPS_HELPER_H_ +#define _CPT_PMD_OPS_HELPER_H_ + +/* + * This file defines the agreement between the common layer and the individual + * crypto drivers for OCTEON TX series. Control path in otx* directory can + * directly call functions declared here. + */ + +/* + * Get meta length required when operating in direct mode (single buffer + * in-place) + * + * @return + * - length + */ + +int32_t +cpt_pmd_ops_helper_get_mlen_direct_mode(void); + +/* + * Get size of contiguous meta buffer to be allocated when working in scatter + * gather mode. + * + * @return + * - length + */ +int +cpt_pmd_ops_helper_get_mlen_sg_mode(void); +#endif /* _CPT_PMD_OPS_HELPER_H_ */ diff --git a/src/seastar/dpdk/drivers/common/cpt/cpt_ucode.h b/src/seastar/dpdk/drivers/common/cpt/cpt_ucode.h new file mode 100644 index 000000000..f21352e6c --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/cpt_ucode.h @@ -0,0 +1,3687 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ + +#ifndef _CPT_UCODE_H_ +#define _CPT_UCODE_H_ +#include <stdbool.h> + +#include "cpt_common.h" +#include "cpt_hw_types.h" +#include "cpt_mcode_defines.h" + +/* + * This file defines functions that are interfaces to microcode spec. + * + */ + +static uint8_t zuc_d[32] = { + 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E, + 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF, + 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1, + 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC +}; + +static __rte_always_inline int +cpt_is_algo_supported(struct rte_crypto_sym_xform *xform) +{ + /* + * Microcode only supports the following combination. + * Encryption followed by authentication + * Authentication followed by decryption + */ + if (xform->next) { + if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) && + (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) { + /* Unsupported as of now by microcode */ + CPT_LOG_DP_ERR("Unsupported combination"); + return -1; + } + if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) && + (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) { + /* For GMAC auth there is no cipher operation */ + if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM || + xform->next->auth.algo != + RTE_CRYPTO_AUTH_AES_GMAC) { + /* Unsupported as of now by microcode */ + CPT_LOG_DP_ERR("Unsupported combination"); + return -1; + } + } + } + return 0; +} + +static __rte_always_inline void +gen_key_snow3g(uint8_t *ck, uint32_t *keyx) +{ + int i, base; + + for (i = 0; i < 4; i++) { + base = 4 * i; + keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) | + (ck[base + 2] << 8) | (ck[base + 3]); + keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]); + } +} + +static __rte_always_inline void +cpt_fc_salt_update(void *ctx, + uint8_t *salt) +{ + struct cpt_ctx *cpt_ctx = ctx; + memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4); +} + +static __rte_always_inline int +cpt_fc_ciph_validate_key_aes(uint16_t key_len) +{ + switch (key_len) { + case CPT_BYTE_16: + case CPT_BYTE_24: + case CPT_BYTE_32: + return 0; + default: + return -1; + } +} + +static __rte_always_inline int +cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx, + uint16_t key_len) +{ + int fc_type = 0; + switch (type) { + case PASSTHROUGH: + fc_type = FC_GEN; + break; + case DES3_CBC: + case DES3_ECB: + fc_type = FC_GEN; + break; + case AES_CBC: + case AES_ECB: + case AES_CFB: + case AES_CTR: + case AES_GCM: + if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0)) + return -1; + fc_type = FC_GEN; + break; + case AES_XTS: + key_len = key_len / 2; + if (unlikely(key_len == CPT_BYTE_24)) { + CPT_LOG_DP_ERR("Invalid AES key len for XTS"); + return -1; + } + if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0)) + return -1; + fc_type = FC_GEN; + break; + case ZUC_EEA3: + case SNOW3G_UEA2: + if (unlikely(key_len != 16)) + return -1; + /* No support for AEAD yet */ + if (unlikely(cpt_ctx->hash_type)) + return -1; + fc_type = ZUC_SNOW3G; + break; + case KASUMI_F8_CBC: + case KASUMI_F8_ECB: + if (unlikely(key_len != 16)) + return -1; + /* No support for AEAD yet */ + if (unlikely(cpt_ctx->hash_type)) + return -1; + fc_type = KASUMI; + break; + default: + return -1; + } + return fc_type; +} + +static __rte_always_inline void +cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx) +{ + cpt_ctx->enc_cipher = 0; + CPT_P_ENC_CTRL(fctx).enc_cipher = 0; +} + +static __rte_always_inline void +cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len) +{ + mc_aes_type_t aes_key_type = 0; + switch (key_len) { + case CPT_BYTE_16: + aes_key_type = AES_128_BIT; + break; + case CPT_BYTE_24: + aes_key_type = AES_192_BIT; + break; + case CPT_BYTE_32: + aes_key_type = AES_256_BIT; + break; + default: + /* This should not happen */ + CPT_LOG_DP_ERR("Invalid AES key len"); + return; + } + CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type; +} + +static __rte_always_inline void +cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key, + uint16_t key_len) +{ + uint32_t keyx[4]; + cpt_ctx->snow3g = 1; + gen_key_snow3g(key, keyx); + memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len); + cpt_ctx->fc_type = ZUC_SNOW3G; + cpt_ctx->zsk_flags = 0; +} + +static __rte_always_inline void +cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key, + uint16_t key_len) +{ + cpt_ctx->snow3g = 0; + memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len); + memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32); + cpt_ctx->fc_type = ZUC_SNOW3G; + cpt_ctx->zsk_flags = 0; +} + +static __rte_always_inline void +cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key, + uint16_t key_len) +{ + cpt_ctx->k_ecb = 1; + memcpy(cpt_ctx->k_ctx.ci_key, key, key_len); + cpt_ctx->zsk_flags = 0; + cpt_ctx->fc_type = KASUMI; +} + +static __rte_always_inline void +cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key, + uint16_t key_len) +{ + memcpy(cpt_ctx->k_ctx.ci_key, key, key_len); + cpt_ctx->zsk_flags = 0; + cpt_ctx->fc_type = KASUMI; +} + +static __rte_always_inline int +cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key, + uint16_t key_len, uint8_t *salt) +{ + struct cpt_ctx *cpt_ctx = ctx; + mc_fc_context_t *fctx = &cpt_ctx->fctx; + uint64_t *ctrl_flags = NULL; + int fc_type; + + /* Validate key before proceeding */ + fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len); + if (unlikely(fc_type == -1)) + return -1; + + if (fc_type == FC_GEN) { + cpt_ctx->fc_type = FC_GEN; + ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags); + *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags); + /* + * We need to always say IV is from DPTR as user can + * sometimes iverride IV per operation. + */ + CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR; + } + + switch (type) { + case PASSTHROUGH: + cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx); + goto fc_success; + case DES3_CBC: + /* CPT performs DES using 3DES with the 8B DES-key + * replicated 2 more times to match the 24B 3DES-key. + * Eg. If org. key is "0x0a 0x0b", then new key is + * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b" + */ + if (key_len == 8) { + /* Skipping the first 8B as it will be copied + * in the regular code flow + */ + memcpy(fctx->enc.encr_key+key_len, key, key_len); + memcpy(fctx->enc.encr_key+2*key_len, key, key_len); + } + break; + case DES3_ECB: + /* For DES3_ECB IV need to be from CTX. */ + CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX; + break; + case AES_CBC: + case AES_ECB: + case AES_CFB: + case AES_CTR: + cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len); + break; + case AES_GCM: + /* Even though iv source is from dptr, + * aes_gcm salt is taken from ctx + */ + if (salt) { + memcpy(fctx->enc.encr_iv, salt, 4); + /* Assuming it was just salt update + * and nothing else + */ + if (!key) + goto fc_success; + } + cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len); + break; + case AES_XTS: + key_len = key_len / 2; + cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len); + + /* Copy key2 for XTS into ipad */ + memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad)); + memcpy(fctx->hmac.ipad, &key[key_len], key_len); + break; + case SNOW3G_UEA2: + cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len); + goto success; + case ZUC_EEA3: + cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len); + goto success; + case KASUMI_F8_ECB: + cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len); + goto success; + case KASUMI_F8_CBC: + cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len); + goto success; + default: + break; + } + + /* Only for FC_GEN case */ + + /* For GMAC auth, cipher must be NULL */ + if (cpt_ctx->hash_type != GMAC_TYPE) + CPT_P_ENC_CTRL(fctx).enc_cipher = type; + + memcpy(fctx->enc.encr_key, key, key_len); + +fc_success: + *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags); + +success: + cpt_ctx->enc_cipher = type; + + return 0; +} + +static __rte_always_inline uint32_t +fill_sg_comp(sg_comp_t *list, + uint32_t i, + phys_addr_t dma_addr, + uint32_t size) +{ + sg_comp_t *to = &list[i>>2]; + + to->u.s.len[i%4] = rte_cpu_to_be_16(size); + to->ptr[i%4] = rte_cpu_to_be_64(dma_addr); + i++; + return i; +} + +static __rte_always_inline uint32_t +fill_sg_comp_from_buf(sg_comp_t *list, + uint32_t i, + buf_ptr_t *from) +{ + sg_comp_t *to = &list[i>>2]; + + to->u.s.len[i%4] = rte_cpu_to_be_16(from->size); + to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr); + i++; + return i; +} + +static __rte_always_inline uint32_t +fill_sg_comp_from_buf_min(sg_comp_t *list, + uint32_t i, + buf_ptr_t *from, + uint32_t *psize) +{ + sg_comp_t *to = &list[i >> 2]; + uint32_t size = *psize; + uint32_t e_len; + + e_len = (size > from->size) ? from->size : size; + to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len); + to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr); + *psize -= e_len; + i++; + return i; +} + +/* + * This fills the MC expected SGIO list + * from IOV given by user. + */ +static __rte_always_inline uint32_t +fill_sg_comp_from_iov(sg_comp_t *list, + uint32_t i, + iov_ptr_t *from, uint32_t from_offset, + uint32_t *psize, buf_ptr_t *extra_buf, + uint32_t extra_offset) +{ + int32_t j; + uint32_t extra_len = extra_buf ? extra_buf->size : 0; + uint32_t size = *psize - extra_len; + buf_ptr_t *bufs; + + bufs = from->bufs; + for (j = 0; (j < from->buf_cnt) && size; j++) { + phys_addr_t e_dma_addr; + uint32_t e_len; + sg_comp_t *to = &list[i >> 2]; + + if (!bufs[j].size) + continue; + + if (unlikely(from_offset)) { + if (from_offset >= bufs[j].size) { + from_offset -= bufs[j].size; + continue; + } + e_dma_addr = bufs[j].dma_addr + from_offset; + e_len = (size > (bufs[j].size - from_offset)) ? + (bufs[j].size - from_offset) : size; + from_offset = 0; + } else { + e_dma_addr = bufs[j].dma_addr; + e_len = (size > bufs[j].size) ? + bufs[j].size : size; + } + + to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len); + to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr); + + if (extra_len && (e_len >= extra_offset)) { + /* Break the data at given offset */ + uint32_t next_len = e_len - extra_offset; + phys_addr_t next_dma = e_dma_addr + extra_offset; + + if (!extra_offset) { + i--; + } else { + e_len = extra_offset; + size -= e_len; + to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len); + } + + /* Insert extra data ptr */ + if (extra_len) { + i++; + to = &list[i >> 2]; + to->u.s.len[i % 4] = + rte_cpu_to_be_16(extra_buf->size); + to->ptr[i % 4] = + rte_cpu_to_be_64(extra_buf->dma_addr); + + /* size already decremented by extra len */ + } + + /* insert the rest of the data */ + if (next_len) { + i++; + to = &list[i >> 2]; + to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len); + to->ptr[i % 4] = rte_cpu_to_be_64(next_dma); + size -= next_len; + } + extra_len = 0; + + } else { + size -= e_len; + } + if (extra_offset) + extra_offset -= size; + i++; + } + + *psize = size; + return (uint32_t)i; +} + +static __rte_always_inline void +cpt_digest_gen_prep(uint32_t flags, + uint64_t d_lens, + digest_params_t *params, + void *op, + void **prep_req) +{ + struct cpt_request_info *req; + uint32_t size, i; + int32_t m_size; + uint16_t data_len, mac_len, key_len; + auth_type_t hash_type; + buf_ptr_t *meta_p; + struct cpt_ctx *ctx; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + uint8_t *in_buffer; + uint32_t g_size_bytes, s_size_bytes; + uint64_t dptr_dma, rptr_dma; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + void *c_vaddr, *m_vaddr; + uint64_t c_dma, m_dma; + opcode_info_t opcode; + + ctx = params->ctx_buf.vaddr; + meta_p = ¶ms->meta_buf; + + m_vaddr = meta_p->vaddr; + m_dma = meta_p->dma_addr; + m_size = meta_p->size; + + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + req = m_vaddr; + + size = sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + hash_type = ctx->hash_type; + mac_len = ctx->mac_len; + key_len = ctx->auth_key_len; + data_len = AUTH_DLEN(d_lens); + + /*GP op header */ + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8)); + if (ctx->hmac) { + opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len); + vq_cmd_w0.s.dlen = + rte_cpu_to_be_16((data_len + ROUNDUP8(key_len))); + } else { + opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE; + vq_cmd_w0.s.param1 = 0; + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len); + } + + opcode.s.minor = 0; + + /* Null auth only case enters the if */ + if (unlikely(!hash_type && !ctx->enc_cipher)) { + opcode.s.major = CPT_MAJOR_OP_MISC; + /* Minor op is passthrough */ + opcode.s.minor = 0x03; + /* Send out completion code only */ + vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1); + } + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input gather list + */ + + i = 0; + + if (ctx->hmac) { + uint64_t k_dma = params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, auth_key); + /* Key */ + i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len)); + } + + /* input data */ + size = data_len; + if (size) { + i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, + 0, &size, NULL, 0); + if (unlikely(size)) { + CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short" + " by %dB", size); + return; + } + } else { + /* + * Looks like we need to support zero data + * gather ptr in case of hash & hmac + */ + i++; + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Gather list + */ + + i = 0; + scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + if (flags & VALID_MAC_BUF) { + if (unlikely(params->mac_buf.size < mac_len)) { + CPT_LOG_DP_ERR("Insufficient MAC size"); + return; + } + + size = mac_len; + i = fill_sg_comp_from_buf_min(scatter_comp, i, + ¶ms->mac_buf, &size); + } else { + size = mac_len; + i = fill_sg_comp_from_iov(scatter_comp, i, + params->src_iov, data_len, + &size, NULL, 0); + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient dst IOV size, short by" + " %dB", size); + return; + } + } + + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void +cpt_enc_hmac_prep(uint32_t flags, + uint64_t d_offs, + uint64_t d_lens, + fc_params_t *fc_params, + void *op, + void **prep_req) +{ + uint32_t iv_offset = 0; + int32_t inputlen, outputlen, enc_dlen, auth_dlen; + struct cpt_ctx *cpt_ctx; + uint32_t cipher_type, hash_type; + uint32_t mac_len, size; + uint8_t iv_len = 16; + struct cpt_request_info *req; + buf_ptr_t *meta_p, *aad_buf = NULL; + uint32_t encr_offset, auth_offset; + uint32_t encr_data_len, auth_data_len, aad_len = 0; + uint32_t passthrough_len = 0; + void *m_vaddr, *offset_vaddr; + uint64_t m_dma, offset_dma, ctx_dma; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + void *c_vaddr; + uint64_t c_dma; + int32_t m_size; + opcode_info_t opcode; + + meta_p = &fc_params->meta_buf; + m_vaddr = meta_p->vaddr; + m_dma = meta_p->dma_addr; + m_size = meta_p->size; + + encr_offset = ENCR_OFFSET(d_offs); + auth_offset = AUTH_OFFSET(d_offs); + encr_data_len = ENCR_DLEN(d_lens); + auth_data_len = AUTH_DLEN(d_lens); + if (unlikely(flags & VALID_AAD_BUF)) { + /* + * We dont support both aad + * and auth data separately + */ + auth_data_len = 0; + auth_offset = 0; + aad_len = fc_params->aad_buf.size; + aad_buf = &fc_params->aad_buf; + } + cpt_ctx = fc_params->ctx_buf.vaddr; + cipher_type = cpt_ctx->enc_cipher; + hash_type = cpt_ctx->hash_type; + mac_len = cpt_ctx->mac_len; + + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* start cpt request info struct at 8 byte boundary */ + size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) - + (uint8_t *)m_vaddr; + + req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size); + + size += sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + if (hash_type == GMAC_TYPE) + encr_data_len = 0; + + if (unlikely(!(flags & VALID_IV_BUF))) { + iv_len = 0; + iv_offset = ENCR_IV_OFFSET(d_offs); + } + + if (unlikely(flags & VALID_AAD_BUF)) { + /* + * When AAD is given, data above encr_offset is pass through + * Since AAD is given as separate pointer and not as offset, + * this is a special case as we need to fragment input data + * into passthrough + encr_data and then insert AAD in between. + */ + if (hash_type != GMAC_TYPE) { + passthrough_len = encr_offset; + auth_offset = passthrough_len + iv_len; + encr_offset = passthrough_len + aad_len + iv_len; + auth_data_len = aad_len + encr_data_len; + } else { + passthrough_len = 16 + aad_len; + auth_offset = passthrough_len + iv_len; + auth_data_len = aad_len; + } + } else { + encr_offset += iv_len; + auth_offset += iv_len; + } + + /* Encryption */ + opcode.s.major = CPT_MAJOR_OP_FC; + opcode.s.minor = 0; + + auth_dlen = auth_offset + auth_data_len; + enc_dlen = encr_data_len + encr_offset; + if (unlikely(encr_data_len & 0xf)) { + if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB)) + enc_dlen = ROUNDUP8(encr_data_len) + encr_offset; + else if (likely((cipher_type == AES_CBC) || + (cipher_type == AES_ECB))) + enc_dlen = ROUNDUP16(encr_data_len) + encr_offset; + } + + if (unlikely(hash_type == GMAC_TYPE)) { + encr_offset = auth_dlen; + enc_dlen = 0; + } + + if (unlikely(auth_dlen > enc_dlen)) { + inputlen = auth_dlen; + outputlen = auth_dlen + mac_len; + } else { + inputlen = enc_dlen; + outputlen = enc_dlen + mac_len; + } + + /* GP op header */ + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len); + vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len); + /* + * In 83XX since we have a limitation of + * IV & Offset control word not part of instruction + * and need to be part of Data Buffer, we check if + * head room is there and then only do the Direct mode processing + */ + if (likely((flags & SINGLE_BUF_INPLACE) && + (flags & SINGLE_BUF_HEADTAILROOM))) { + void *dm_vaddr = fc_params->bufs[0].vaddr; + uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr; + /* + * This flag indicates that there is 24 bytes head room and + * 8 bytes tail room available, so that we get to do + * DIRECT MODE with limitation + */ + + offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len; + offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len; + + /* DPTR */ + req->ist.ei1 = offset_dma; + /* RPTR should just exclude offset control word */ + req->ist.ei2 = dm_dma_addr - iv_len; + req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr + + outputlen - iv_len); + + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN); + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + if (likely(iv_len)) { + uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr + + OFF_CTRL_LEN); + uint64_t *src = fc_params->iv_buf; + dest[0] = src[0]; + dest[1] = src[1]; + } + + *(uint64_t *)offset_vaddr = + rte_cpu_to_be_64(((uint64_t)encr_offset << 16) | + ((uint64_t)iv_offset << 8) | + ((uint64_t)auth_offset)); + + } else { + uint32_t i, g_size_bytes, s_size_bytes; + uint64_t dptr_dma, rptr_dma; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + uint8_t *in_buffer; + + /* This falls under strict SG mode */ + offset_vaddr = m_vaddr; + offset_dma = m_dma; + size = OFF_CTRL_LEN + iv_len; + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + opcode.s.major |= CPT_DMA_MODE; + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + if (likely(iv_len)) { + uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr + + OFF_CTRL_LEN); + uint64_t *src = fc_params->iv_buf; + dest[0] = src[0]; + dest[1] = src[1]; + } + + *(uint64_t *)offset_vaddr = + rte_cpu_to_be_64(((uint64_t)encr_offset << 16) | + ((uint64_t)iv_offset << 8) | + ((uint64_t)auth_offset)); + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input Gather List + */ + + i = 0; + + /* Offset control word that includes iv */ + i = fill_sg_comp(gather_comp, i, offset_dma, + OFF_CTRL_LEN + iv_len); + + /* Add input data */ + size = inputlen - iv_len; + if (likely(size)) { + uint32_t aad_offset = aad_len ? passthrough_len : 0; + + if (unlikely(flags & SINGLE_BUF_INPLACE)) { + i = fill_sg_comp_from_buf_min(gather_comp, i, + fc_params->bufs, + &size); + } else { + i = fill_sg_comp_from_iov(gather_comp, i, + fc_params->src_iov, + 0, &size, + aad_buf, aad_offset); + } + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Scatter list + */ + i = 0; + scatter_comp = + (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + /* Add IV */ + if (likely(iv_len)) { + i = fill_sg_comp(scatter_comp, i, + offset_dma + OFF_CTRL_LEN, + iv_len); + } + + /* output data or output data + digest*/ + if (unlikely(flags & VALID_MAC_BUF)) { + size = outputlen - iv_len - mac_len; + if (size) { + uint32_t aad_offset = + aad_len ? passthrough_len : 0; + + if (unlikely(flags & SINGLE_BUF_INPLACE)) { + i = fill_sg_comp_from_buf_min( + scatter_comp, + i, + fc_params->bufs, + &size); + } else { + i = fill_sg_comp_from_iov(scatter_comp, + i, + fc_params->dst_iov, + 0, + &size, + aad_buf, + aad_offset); + } + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer" + " space, size %d needed", + size); + return; + } + } + /* mac_data */ + if (mac_len) { + i = fill_sg_comp_from_buf(scatter_comp, i, + &fc_params->mac_buf); + } + } else { + /* Output including mac */ + size = outputlen - iv_len; + if (likely(size)) { + uint32_t aad_offset = + aad_len ? passthrough_len : 0; + + if (unlikely(flags & SINGLE_BUF_INPLACE)) { + i = fill_sg_comp_from_buf_min( + scatter_comp, + i, + fc_params->bufs, + &size); + } else { + i = fill_sg_comp_from_iov(scatter_comp, + i, + fc_params->dst_iov, + 0, + &size, + aad_buf, + aad_offset); + } + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer" + " space, size %d needed", + size); + return; + } + } + } + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + } + + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + ctx_dma = fc_params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, fctx); + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + vq_cmd_w3.s.grp = 0; + vq_cmd_w3.s.cptr = ctx_dma; + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void +cpt_dec_hmac_prep(uint32_t flags, + uint64_t d_offs, + uint64_t d_lens, + fc_params_t *fc_params, + void *op, + void **prep_req) +{ + uint32_t iv_offset = 0, size; + int32_t inputlen, outputlen, enc_dlen, auth_dlen; + struct cpt_ctx *cpt_ctx; + int32_t hash_type, mac_len, m_size; + uint8_t iv_len = 16; + struct cpt_request_info *req; + buf_ptr_t *meta_p, *aad_buf = NULL; + uint32_t encr_offset, auth_offset; + uint32_t encr_data_len, auth_data_len, aad_len = 0; + uint32_t passthrough_len = 0; + void *m_vaddr, *offset_vaddr; + uint64_t m_dma, offset_dma, ctx_dma; + opcode_info_t opcode; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + void *c_vaddr; + uint64_t c_dma; + + meta_p = &fc_params->meta_buf; + m_vaddr = meta_p->vaddr; + m_dma = meta_p->dma_addr; + m_size = meta_p->size; + + encr_offset = ENCR_OFFSET(d_offs); + auth_offset = AUTH_OFFSET(d_offs); + encr_data_len = ENCR_DLEN(d_lens); + auth_data_len = AUTH_DLEN(d_lens); + + if (unlikely(flags & VALID_AAD_BUF)) { + /* + * We dont support both aad + * and auth data separately + */ + auth_data_len = 0; + auth_offset = 0; + aad_len = fc_params->aad_buf.size; + aad_buf = &fc_params->aad_buf; + } + + cpt_ctx = fc_params->ctx_buf.vaddr; + hash_type = cpt_ctx->hash_type; + mac_len = cpt_ctx->mac_len; + + if (hash_type == GMAC_TYPE) + encr_data_len = 0; + + if (unlikely(!(flags & VALID_IV_BUF))) { + iv_len = 0; + iv_offset = ENCR_IV_OFFSET(d_offs); + } + + if (unlikely(flags & VALID_AAD_BUF)) { + /* + * When AAD is given, data above encr_offset is pass through + * Since AAD is given as separate pointer and not as offset, + * this is a special case as we need to fragment input data + * into passthrough + encr_data and then insert AAD in between. + */ + if (hash_type != GMAC_TYPE) { + passthrough_len = encr_offset; + auth_offset = passthrough_len + iv_len; + encr_offset = passthrough_len + aad_len + iv_len; + auth_data_len = aad_len + encr_data_len; + } else { + passthrough_len = 16 + aad_len; + auth_offset = passthrough_len + iv_len; + auth_data_len = aad_len; + } + } else { + encr_offset += iv_len; + auth_offset += iv_len; + } + + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* start cpt request info structure at 8 byte alignment */ + size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) - + (uint8_t *)m_vaddr; + + req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size); + + size += sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* Decryption */ + opcode.s.major = CPT_MAJOR_OP_FC; + opcode.s.minor = 1; + + enc_dlen = encr_offset + encr_data_len; + auth_dlen = auth_offset + auth_data_len; + + if (auth_dlen > enc_dlen) { + inputlen = auth_dlen + mac_len; + outputlen = auth_dlen; + } else { + inputlen = enc_dlen + mac_len; + outputlen = enc_dlen; + } + + if (hash_type == GMAC_TYPE) + encr_offset = inputlen; + + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len); + vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len); + + /* + * In 83XX since we have a limitation of + * IV & Offset control word not part of instruction + * and need to be part of Data Buffer, we check if + * head room is there and then only do the Direct mode processing + */ + if (likely((flags & SINGLE_BUF_INPLACE) && + (flags & SINGLE_BUF_HEADTAILROOM))) { + void *dm_vaddr = fc_params->bufs[0].vaddr; + uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr; + /* + * This flag indicates that there is 24 bytes head room and + * 8 bytes tail room available, so that we get to do + * DIRECT MODE with limitation + */ + + offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len; + offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len; + req->ist.ei1 = offset_dma; + + /* RPTR should just exclude offset control word */ + req->ist.ei2 = dm_dma_addr - iv_len; + + req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr + + outputlen - iv_len); + /* since this is decryption, + * don't touch the content of + * alternate ccode space as it contains + * hmac. + */ + + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN); + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + if (likely(iv_len)) { + uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr + + OFF_CTRL_LEN); + uint64_t *src = fc_params->iv_buf; + dest[0] = src[0]; + dest[1] = src[1]; + } + + *(uint64_t *)offset_vaddr = + rte_cpu_to_be_64(((uint64_t)encr_offset << 16) | + ((uint64_t)iv_offset << 8) | + ((uint64_t)auth_offset)); + + } else { + uint64_t dptr_dma, rptr_dma; + uint32_t g_size_bytes, s_size_bytes; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + uint8_t *in_buffer; + uint8_t i = 0; + + /* This falls under strict SG mode */ + offset_vaddr = m_vaddr; + offset_dma = m_dma; + size = OFF_CTRL_LEN + iv_len; + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + opcode.s.major |= CPT_DMA_MODE; + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + if (likely(iv_len)) { + uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr + + OFF_CTRL_LEN); + uint64_t *src = fc_params->iv_buf; + dest[0] = src[0]; + dest[1] = src[1]; + } + + *(uint64_t *)offset_vaddr = + rte_cpu_to_be_64(((uint64_t)encr_offset << 16) | + ((uint64_t)iv_offset << 8) | + ((uint64_t)auth_offset)); + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input Gather List + */ + i = 0; + + /* Offset control word that includes iv */ + i = fill_sg_comp(gather_comp, i, offset_dma, + OFF_CTRL_LEN + iv_len); + + /* Add input data */ + if (flags & VALID_MAC_BUF) { + size = inputlen - iv_len - mac_len; + if (size) { + /* input data only */ + if (unlikely(flags & SINGLE_BUF_INPLACE)) { + i = fill_sg_comp_from_buf_min( + gather_comp, i, + fc_params->bufs, + &size); + } else { + uint32_t aad_offset = aad_len ? + passthrough_len : 0; + + i = fill_sg_comp_from_iov(gather_comp, + i, + fc_params->src_iov, + 0, &size, + aad_buf, + aad_offset); + } + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer" + " space, size %d needed", + size); + return; + } + } + + /* mac data */ + if (mac_len) { + i = fill_sg_comp_from_buf(gather_comp, i, + &fc_params->mac_buf); + } + } else { + /* input data + mac */ + size = inputlen - iv_len; + if (size) { + if (unlikely(flags & SINGLE_BUF_INPLACE)) { + i = fill_sg_comp_from_buf_min( + gather_comp, i, + fc_params->bufs, + &size); + } else { + uint32_t aad_offset = aad_len ? + passthrough_len : 0; + + if (unlikely(!fc_params->src_iov)) { + CPT_LOG_DP_ERR("Bad input args"); + return; + } + + i = fill_sg_comp_from_iov( + gather_comp, i, + fc_params->src_iov, + 0, &size, + aad_buf, + aad_offset); + } + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer" + " space, size %d needed", + size); + return; + } + } + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Scatter List + */ + + i = 0; + scatter_comp = + (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + /* Add iv */ + if (iv_len) { + i = fill_sg_comp(scatter_comp, i, + offset_dma + OFF_CTRL_LEN, + iv_len); + } + + /* Add output data */ + size = outputlen - iv_len; + if (size) { + if (unlikely(flags & SINGLE_BUF_INPLACE)) { + /* handle single buffer here */ + i = fill_sg_comp_from_buf_min(scatter_comp, i, + fc_params->bufs, + &size); + } else { + uint32_t aad_offset = aad_len ? + passthrough_len : 0; + + if (unlikely(!fc_params->dst_iov)) { + CPT_LOG_DP_ERR("Bad input args"); + return; + } + + i = fill_sg_comp_from_iov(scatter_comp, i, + fc_params->dst_iov, 0, + &size, aad_buf, + aad_offset); + } + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + size += COMPLETION_CODE_SIZE; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + } + + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + ctx_dma = fc_params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, fctx); + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + vq_cmd_w3.s.grp = 0; + vq_cmd_w3.s.cptr = ctx_dma; + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void +cpt_zuc_snow3g_enc_prep(uint32_t req_flags, + uint64_t d_offs, + uint64_t d_lens, + fc_params_t *params, + void *op, + void **prep_req) +{ + uint32_t size; + int32_t inputlen, outputlen; + struct cpt_ctx *cpt_ctx; + uint32_t mac_len = 0; + uint8_t snow3g, j; + struct cpt_request_info *req; + buf_ptr_t *buf_p; + uint32_t encr_offset = 0, auth_offset = 0; + uint32_t encr_data_len = 0, auth_data_len = 0; + int flags, iv_len = 16, m_size; + void *m_vaddr, *c_vaddr; + uint64_t m_dma, c_dma, offset_ctrl; + uint64_t *offset_vaddr, offset_dma; + uint32_t *iv_s, iv[4]; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + opcode_info_t opcode; + + buf_p = ¶ms->meta_buf; + m_vaddr = buf_p->vaddr; + m_dma = buf_p->dma_addr; + m_size = buf_p->size; + + cpt_ctx = params->ctx_buf.vaddr; + flags = cpt_ctx->zsk_flags; + mac_len = cpt_ctx->mac_len; + snow3g = cpt_ctx->snow3g; + + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* Reserve memory for cpt request info */ + req = m_vaddr; + + size = sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G; + + /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */ + opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) | + (0 << 3) | (flags & 0x7)); + + if (flags == 0x1) { + /* + * Microcode expects offsets in bytes + * TODO: Rounding off + */ + auth_data_len = AUTH_DLEN(d_lens); + + /* EIA3 or UIA2 */ + auth_offset = AUTH_OFFSET(d_offs); + auth_offset = auth_offset / 8; + + /* consider iv len */ + auth_offset += iv_len; + + inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8); + outputlen = mac_len; + + offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset); + + } else { + /* EEA3 or UEA2 */ + /* + * Microcode expects offsets in bytes + * TODO: Rounding off + */ + encr_data_len = ENCR_DLEN(d_lens); + + encr_offset = ENCR_OFFSET(d_offs); + encr_offset = encr_offset / 8; + /* consider iv len */ + encr_offset += iv_len; + + inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8); + outputlen = inputlen; + + /* iv offset is 0 */ + offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16); + } + + /* IV */ + iv_s = (flags == 0x1) ? params->auth_iv_buf : + params->iv_buf; + + if (snow3g) { + /* + * DPDK seems to provide it in form of IV3 IV2 IV1 IV0 + * and BigEndian, MC needs it as IV0 IV1 IV2 IV3 + */ + + for (j = 0; j < 4; j++) + iv[j] = iv_s[3 - j]; + } else { + /* ZUC doesn't need a swap */ + for (j = 0; j < 4; j++) + iv[j] = iv_s[j]; + } + + /* + * GP op header, lengths are expected in bits. + */ + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len); + vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len); + + /* + * In 83XX since we have a limitation of + * IV & Offset control word not part of instruction + * and need to be part of Data Buffer, we check if + * head room is there and then only do the Direct mode processing + */ + if (likely((req_flags & SINGLE_BUF_INPLACE) && + (req_flags & SINGLE_BUF_HEADTAILROOM))) { + void *dm_vaddr = params->bufs[0].vaddr; + uint64_t dm_dma_addr = params->bufs[0].dma_addr; + /* + * This flag indicates that there is 24 bytes head room and + * 8 bytes tail room available, so that we get to do + * DIRECT MODE with limitation + */ + + offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr - + OFF_CTRL_LEN - iv_len); + offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len; + + /* DPTR */ + req->ist.ei1 = offset_dma; + /* RPTR should just exclude offset control word */ + req->ist.ei2 = dm_dma_addr - iv_len; + req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr + + outputlen - iv_len); + + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN); + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + if (likely(iv_len)) { + uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr + + OFF_CTRL_LEN); + memcpy(iv_d, iv, 16); + } + + *offset_vaddr = offset_ctrl; + } else { + uint32_t i, g_size_bytes, s_size_bytes; + uint64_t dptr_dma, rptr_dma; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + uint8_t *in_buffer; + uint32_t *iv_d; + + /* save space for iv */ + offset_vaddr = m_vaddr; + offset_dma = m_dma; + + m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len; + m_dma += OFF_CTRL_LEN + iv_len; + m_size -= OFF_CTRL_LEN + iv_len; + + opcode.s.major |= CPT_DMA_MODE; + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input Gather List + */ + i = 0; + + /* Offset control word followed by iv */ + + i = fill_sg_comp(gather_comp, i, offset_dma, + OFF_CTRL_LEN + iv_len); + + /* iv offset is 0 */ + *offset_vaddr = offset_ctrl; + + iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN); + memcpy(iv_d, iv, 16); + + /* input data */ + size = inputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(gather_comp, i, + params->src_iov, + 0, &size, NULL, 0); + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Scatter List + */ + + i = 0; + scatter_comp = + (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + if (flags == 0x1) { + /* IV in SLIST only for EEA3 & UEA2 */ + iv_len = 0; + } + + if (iv_len) { + i = fill_sg_comp(scatter_comp, i, + offset_dma + OFF_CTRL_LEN, iv_len); + } + + /* Add output data */ + if (req_flags & VALID_MAC_BUF) { + size = outputlen - iv_len - mac_len; + if (size) { + i = fill_sg_comp_from_iov(scatter_comp, i, + params->dst_iov, 0, + &size, NULL, 0); + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + + /* mac data */ + if (mac_len) { + i = fill_sg_comp_from_buf(scatter_comp, i, + ¶ms->mac_buf); + } + } else { + /* Output including mac */ + size = outputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(scatter_comp, i, + params->dst_iov, 0, + &size, NULL, 0); + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + } + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + } + + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + vq_cmd_w3.s.grp = 0; + vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, zs_ctx); + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void +cpt_zuc_snow3g_dec_prep(uint32_t req_flags, + uint64_t d_offs, + uint64_t d_lens, + fc_params_t *params, + void *op, + void **prep_req) +{ + uint32_t size; + int32_t inputlen = 0, outputlen; + struct cpt_ctx *cpt_ctx; + uint8_t snow3g, iv_len = 16; + struct cpt_request_info *req; + buf_ptr_t *buf_p; + uint32_t encr_offset; + uint32_t encr_data_len; + int flags, m_size; + void *m_vaddr, *c_vaddr; + uint64_t m_dma, c_dma; + uint64_t *offset_vaddr, offset_dma; + uint32_t *iv_s, iv[4], j; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + opcode_info_t opcode; + + buf_p = ¶ms->meta_buf; + m_vaddr = buf_p->vaddr; + m_dma = buf_p->dma_addr; + m_size = buf_p->size; + + /* + * Microcode expects offsets in bytes + * TODO: Rounding off + */ + encr_offset = ENCR_OFFSET(d_offs) / 8; + encr_data_len = ENCR_DLEN(d_lens); + + cpt_ctx = params->ctx_buf.vaddr; + flags = cpt_ctx->zsk_flags; + snow3g = cpt_ctx->snow3g; + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* Reserve memory for cpt request info */ + req = m_vaddr; + + size = sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G; + + /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */ + opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) | + (0 << 3) | (flags & 0x7)); + + /* consider iv len */ + encr_offset += iv_len; + + inputlen = encr_offset + + (RTE_ALIGN(encr_data_len, 8) / 8); + outputlen = inputlen; + + /* IV */ + iv_s = params->iv_buf; + if (snow3g) { + /* + * DPDK seems to provide it in form of IV3 IV2 IV1 IV0 + * and BigEndian, MC needs it as IV0 IV1 IV2 IV3 + */ + + for (j = 0; j < 4; j++) + iv[j] = iv_s[3 - j]; + } else { + /* ZUC doesn't need a swap */ + for (j = 0; j < 4; j++) + iv[j] = iv_s[j]; + } + + /* + * GP op header, lengths are expected in bits. + */ + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len); + + /* + * In 83XX since we have a limitation of + * IV & Offset control word not part of instruction + * and need to be part of Data Buffer, we check if + * head room is there and then only do the Direct mode processing + */ + if (likely((req_flags & SINGLE_BUF_INPLACE) && + (req_flags & SINGLE_BUF_HEADTAILROOM))) { + void *dm_vaddr = params->bufs[0].vaddr; + uint64_t dm_dma_addr = params->bufs[0].dma_addr; + /* + * This flag indicates that there is 24 bytes head room and + * 8 bytes tail room available, so that we get to do + * DIRECT MODE with limitation + */ + + offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr - + OFF_CTRL_LEN - iv_len); + offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len; + + /* DPTR */ + req->ist.ei1 = offset_dma; + /* RPTR should just exclude offset control word */ + req->ist.ei2 = dm_dma_addr - iv_len; + req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr + + outputlen - iv_len); + + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN); + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + if (likely(iv_len)) { + uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr + + OFF_CTRL_LEN); + memcpy(iv_d, iv, 16); + } + + /* iv offset is 0 */ + *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16); + } else { + uint32_t i, g_size_bytes, s_size_bytes; + uint64_t dptr_dma, rptr_dma; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + uint8_t *in_buffer; + uint32_t *iv_d; + + /* save space for offset and iv... */ + offset_vaddr = m_vaddr; + offset_dma = m_dma; + + m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len; + m_dma += OFF_CTRL_LEN + iv_len; + m_size -= OFF_CTRL_LEN + iv_len; + + opcode.s.major |= CPT_DMA_MODE; + + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input Gather List + */ + i = 0; + + /* Offset control word */ + + /* iv offset is 0 */ + *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16); + + i = fill_sg_comp(gather_comp, i, offset_dma, + OFF_CTRL_LEN + iv_len); + + iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN); + memcpy(iv_d, iv, 16); + + /* Add input data */ + size = inputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(gather_comp, i, + params->src_iov, + 0, &size, NULL, 0); + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Scatter List + */ + + i = 0; + scatter_comp = + (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + /* IV */ + i = fill_sg_comp(scatter_comp, i, + offset_dma + OFF_CTRL_LEN, + iv_len); + + /* Add output data */ + size = outputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(scatter_comp, i, + params->dst_iov, 0, + &size, NULL, 0); + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + } + + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + vq_cmd_w3.s.grp = 0; + vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, zs_ctx); + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void +cpt_kasumi_enc_prep(uint32_t req_flags, + uint64_t d_offs, + uint64_t d_lens, + fc_params_t *params, + void *op, + void **prep_req) +{ + uint32_t size; + int32_t inputlen = 0, outputlen = 0; + struct cpt_ctx *cpt_ctx; + uint32_t mac_len = 0; + uint8_t i = 0; + struct cpt_request_info *req; + buf_ptr_t *buf_p; + uint32_t encr_offset, auth_offset; + uint32_t encr_data_len, auth_data_len; + int flags, m_size; + uint8_t *iv_s, *iv_d, iv_len = 8; + uint8_t dir = 0; + void *m_vaddr, *c_vaddr; + uint64_t m_dma, c_dma; + uint64_t *offset_vaddr, offset_dma; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + opcode_info_t opcode; + uint8_t *in_buffer; + uint32_t g_size_bytes, s_size_bytes; + uint64_t dptr_dma, rptr_dma; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + + buf_p = ¶ms->meta_buf; + m_vaddr = buf_p->vaddr; + m_dma = buf_p->dma_addr; + m_size = buf_p->size; + + encr_offset = ENCR_OFFSET(d_offs) / 8; + auth_offset = AUTH_OFFSET(d_offs) / 8; + encr_data_len = ENCR_DLEN(d_lens); + auth_data_len = AUTH_DLEN(d_lens); + + cpt_ctx = params->ctx_buf.vaddr; + flags = cpt_ctx->zsk_flags; + mac_len = cpt_ctx->mac_len; + + if (flags == 0x0) + iv_s = params->iv_buf; + else + iv_s = params->auth_iv_buf; + + dir = iv_s[8] & 0x1; + + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* Reserve memory for cpt request info */ + req = m_vaddr; + + size = sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE; + + /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */ + opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) | + (dir << 4) | (0 << 3) | (flags & 0x7)); + + /* + * GP op header, lengths are expected in bits. + */ + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len); + vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len); + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + /* consider iv len */ + if (flags == 0x0) { + encr_offset += iv_len; + auth_offset += iv_len; + } + + /* save space for offset ctrl and iv */ + offset_vaddr = m_vaddr; + offset_dma = m_dma; + + m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len; + m_dma += OFF_CTRL_LEN + iv_len; + m_size -= OFF_CTRL_LEN + iv_len; + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input Gather List + */ + i = 0; + + /* Offset control word followed by iv */ + + if (flags == 0x0) { + inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8); + outputlen = inputlen; + /* iv offset is 0 */ + *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16); + } else { + inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8); + outputlen = mac_len; + /* iv offset is 0 */ + *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset); + } + + i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len); + + /* IV */ + iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN; + memcpy(iv_d, iv_s, iv_len); + + /* input data */ + size = inputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(gather_comp, i, + params->src_iov, 0, + &size, NULL, 0); + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Scatter List + */ + + i = 0; + scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + if (flags == 0x1) { + /* IV in SLIST only for F8 */ + iv_len = 0; + } + + /* IV */ + if (iv_len) { + i = fill_sg_comp(scatter_comp, i, + offset_dma + OFF_CTRL_LEN, + iv_len); + } + + /* Add output data */ + if (req_flags & VALID_MAC_BUF) { + size = outputlen - iv_len - mac_len; + if (size) { + i = fill_sg_comp_from_iov(scatter_comp, i, + params->dst_iov, 0, + &size, NULL, 0); + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + + /* mac data */ + if (mac_len) { + i = fill_sg_comp_from_buf(scatter_comp, i, + ¶ms->mac_buf); + } + } else { + /* Output including mac */ + size = outputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(scatter_comp, i, + params->dst_iov, 0, + &size, NULL, 0); + + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + } + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + vq_cmd_w3.s.grp = 0; + vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, k_ctx); + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void +cpt_kasumi_dec_prep(uint64_t d_offs, + uint64_t d_lens, + fc_params_t *params, + void *op, + void **prep_req) +{ + uint32_t size; + int32_t inputlen = 0, outputlen; + struct cpt_ctx *cpt_ctx; + uint8_t i = 0, iv_len = 8; + struct cpt_request_info *req; + buf_ptr_t *buf_p; + uint32_t encr_offset; + uint32_t encr_data_len; + int flags, m_size; + uint8_t dir = 0; + void *m_vaddr, *c_vaddr; + uint64_t m_dma, c_dma; + uint64_t *offset_vaddr, offset_dma; + vq_cmd_word0_t vq_cmd_w0; + vq_cmd_word3_t vq_cmd_w3; + opcode_info_t opcode; + uint8_t *in_buffer; + uint32_t g_size_bytes, s_size_bytes; + uint64_t dptr_dma, rptr_dma; + sg_comp_t *gather_comp; + sg_comp_t *scatter_comp; + + buf_p = ¶ms->meta_buf; + m_vaddr = buf_p->vaddr; + m_dma = buf_p->dma_addr; + m_size = buf_p->size; + + encr_offset = ENCR_OFFSET(d_offs) / 8; + encr_data_len = ENCR_DLEN(d_lens); + + cpt_ctx = params->ctx_buf.vaddr; + flags = cpt_ctx->zsk_flags; + /* + * Save initial space that followed app data for completion code & + * alternate completion code to fall in same cache line as app data + */ + m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE; + m_dma += COMPLETION_CODE_SIZE; + size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) - + (uint8_t *)m_vaddr; + + c_vaddr = (uint8_t *)m_vaddr + size; + c_dma = m_dma + size; + size += sizeof(cpt_res_s_t); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* Reserve memory for cpt request info */ + req = m_vaddr; + + size = sizeof(struct cpt_request_info); + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE; + + /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */ + opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) | + (dir << 4) | (0 << 3) | (flags & 0x7)); + + /* + * GP op header, lengths are expected in bits. + */ + vq_cmd_w0.u64 = 0; + vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len); + vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags); + + /* consider iv len */ + encr_offset += iv_len; + + inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8); + outputlen = inputlen; + + /* save space for offset ctrl & iv */ + offset_vaddr = m_vaddr; + offset_dma = m_dma; + + m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len; + m_dma += OFF_CTRL_LEN + iv_len; + m_size -= OFF_CTRL_LEN + iv_len; + + /* DPTR has SG list */ + in_buffer = m_vaddr; + dptr_dma = m_dma; + + ((uint16_t *)in_buffer)[0] = 0; + ((uint16_t *)in_buffer)[1] = 0; + + /* TODO Add error check if space will be sufficient */ + gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8); + + /* + * Input Gather List + */ + i = 0; + + /* Offset control word followed by iv */ + *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16); + + i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len); + + /* IV */ + memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN, + params->iv_buf, iv_len); + + /* Add input data */ + size = inputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(gather_comp, i, + params->src_iov, + 0, &size, NULL, 0); + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i); + g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + /* + * Output Scatter List + */ + + i = 0; + scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes); + + /* IV */ + i = fill_sg_comp(scatter_comp, i, + offset_dma + OFF_CTRL_LEN, + iv_len); + + /* Add output data */ + size = outputlen - iv_len; + if (size) { + i = fill_sg_comp_from_iov(scatter_comp, i, + params->dst_iov, 0, + &size, NULL, 0); + if (unlikely(size)) { + CPT_LOG_DP_ERR("Insufficient buffer space," + " size %d needed", size); + return; + } + } + ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i); + s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t); + + size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE; + + /* This is DPTR len incase of SG mode */ + vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size); + + m_vaddr = (uint8_t *)m_vaddr + size; + m_dma += size; + m_size -= size; + + /* cpt alternate completion address saved earlier */ + req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); + *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT); + rptr_dma = c_dma - 8; + + req->ist.ei1 = dptr_dma; + req->ist.ei2 = rptr_dma; + + /* First 16-bit swap then 64-bit swap */ + /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions + * to eliminate all the swapping + */ + vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64); + + /* vq command w3 */ + vq_cmd_w3.u64 = 0; + vq_cmd_w3.s.grp = 0; + vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr + + offsetof(struct cpt_ctx, k_ctx); + + /* 16 byte aligned cpt res address */ + req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr); + *req->completion_addr = COMPLETION_CODE_INIT; + req->comp_baddr = c_dma; + + /* Fill microcode part of instruction */ + req->ist.ei0 = vq_cmd_w0.u64; + req->ist.ei3 = vq_cmd_w3.u64; + + req->op = op; + + *prep_req = req; + return; +} + +static __rte_always_inline void * +cpt_fc_dec_hmac_prep(uint32_t flags, + uint64_t d_offs, + uint64_t d_lens, + fc_params_t *fc_params, + void *op) +{ + struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr; + uint8_t fc_type; + void *prep_req = NULL; + + fc_type = ctx->fc_type; + + if (likely(fc_type == FC_GEN)) { + cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op, + &prep_req); + } else if (fc_type == ZUC_SNOW3G) { + cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op, + &prep_req); + } else if (fc_type == KASUMI) { + cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req); + } + + /* + * For AUTH_ONLY case, + * MC only supports digest generation and verification + * should be done in software by memcmp() + */ + + return prep_req; +} + +static __rte_always_inline void *__hot +cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, + fc_params_t *fc_params, void *op) +{ + struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr; + uint8_t fc_type; + void *prep_req = NULL; + + fc_type = ctx->fc_type; + + /* Common api for rest of the ops */ + if (likely(fc_type == FC_GEN)) { + cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op, + &prep_req); + } else if (fc_type == ZUC_SNOW3G) { + cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op, + &prep_req); + } else if (fc_type == KASUMI) { + cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op, + &prep_req); + } else if (fc_type == HASH_HMAC) { + cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req); + } + + return prep_req; +} + +static __rte_always_inline int +cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key, + uint16_t key_len, uint16_t mac_len) +{ + struct cpt_ctx *cpt_ctx = ctx; + mc_fc_context_t *fctx = &cpt_ctx->fctx; + uint64_t *ctrl_flags = NULL; + + if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) { + uint32_t keyx[4]; + + if (key_len != 16) + return -1; + /* No support for AEAD yet */ + if (cpt_ctx->enc_cipher) + return -1; + /* For ZUC/SNOW3G/Kasumi */ + switch (type) { + case SNOW3G_UIA2: + cpt_ctx->snow3g = 1; + gen_key_snow3g(key, keyx); + memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len); + cpt_ctx->fc_type = ZUC_SNOW3G; + cpt_ctx->zsk_flags = 0x1; + break; + case ZUC_EIA3: + cpt_ctx->snow3g = 0; + memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len); + memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32); + cpt_ctx->fc_type = ZUC_SNOW3G; + cpt_ctx->zsk_flags = 0x1; + break; + case KASUMI_F9_ECB: + /* Kasumi ECB mode */ + cpt_ctx->k_ecb = 1; + memcpy(cpt_ctx->k_ctx.ci_key, key, key_len); + cpt_ctx->fc_type = KASUMI; + cpt_ctx->zsk_flags = 0x1; + break; + case KASUMI_F9_CBC: + memcpy(cpt_ctx->k_ctx.ci_key, key, key_len); + cpt_ctx->fc_type = KASUMI; + cpt_ctx->zsk_flags = 0x1; + break; + default: + return -1; + } + cpt_ctx->mac_len = 4; + cpt_ctx->hash_type = type; + return 0; + } + + if (!(cpt_ctx->fc_type == FC_GEN && !type)) { + if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher) + cpt_ctx->fc_type = HASH_HMAC; + } + + ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags; + *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags); + + /* For GMAC auth, cipher must be NULL */ + if (type == GMAC_TYPE) + CPT_P_ENC_CTRL(fctx).enc_cipher = 0; + + CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type; + CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len; + + if (key_len) { + cpt_ctx->hmac = 1; + memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key)); + memcpy(cpt_ctx->auth_key, key, key_len); + cpt_ctx->auth_key_len = key_len; + memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad)); + memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad)); + memcpy(fctx->hmac.opad, key, key_len); + CPT_P_ENC_CTRL(fctx).auth_input_type = 1; + } + *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags); + return 0; +} + +static __rte_always_inline int +fill_sess_aead(struct rte_crypto_sym_xform *xform, + struct cpt_sess_misc *sess) +{ + struct rte_crypto_aead_xform *aead_form; + cipher_type_t enc_type = 0; /* NULL Cipher type */ + auth_type_t auth_type = 0; /* NULL Auth type */ + uint32_t cipher_key_len = 0; + uint8_t zsk_flag = 0, aes_gcm = 0; + aead_form = &xform->aead; + void *ctx; + + if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && + aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) { + sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT; + sess->cpt_op |= CPT_OP_AUTH_GENERATE; + } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT && + aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) { + sess->cpt_op |= CPT_OP_CIPHER_DECRYPT; + sess->cpt_op |= CPT_OP_AUTH_VERIFY; + } else { + CPT_LOG_DP_ERR("Unknown cipher operation\n"); + return -1; + } + switch (aead_form->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + enc_type = AES_GCM; + cipher_key_len = 16; + aes_gcm = 1; + break; + case RTE_CRYPTO_AEAD_AES_CCM: + CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u", + aead_form->algo); + return -1; + default: + CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified", + aead_form->algo); + return -1; + } + if (aead_form->key.length < cipher_key_len) { + CPT_LOG_DP_ERR("Invalid cipher params keylen %lu", + (unsigned int long)aead_form->key.length); + return -1; + } + sess->zsk_flag = zsk_flag; + sess->aes_gcm = aes_gcm; + sess->mac_len = aead_form->digest_length; + sess->iv_offset = aead_form->iv.offset; + sess->iv_length = aead_form->iv.length; + sess->aad_length = aead_form->aad_length; + ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)), + + cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data, + aead_form->key.length, NULL); + + cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length); + + return 0; +} + +static __rte_always_inline int +fill_sess_cipher(struct rte_crypto_sym_xform *xform, + struct cpt_sess_misc *sess) +{ + struct rte_crypto_cipher_xform *c_form; + cipher_type_t enc_type = 0; /* NULL Cipher type */ + uint32_t cipher_key_len = 0; + uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0; + + if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) + return -1; + + c_form = &xform->cipher; + + if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT; + else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) + sess->cpt_op |= CPT_OP_CIPHER_DECRYPT; + else { + CPT_LOG_DP_ERR("Unknown cipher operation\n"); + return -1; + } + + switch (c_form->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + enc_type = AES_CBC; + cipher_key_len = 16; + break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + enc_type = DES3_CBC; + cipher_key_len = 24; + break; + case RTE_CRYPTO_CIPHER_DES_CBC: + /* DES is implemented using 3DES in hardware */ + enc_type = DES3_CBC; + cipher_key_len = 8; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + enc_type = AES_CTR; + cipher_key_len = 16; + aes_ctr = 1; + break; + case RTE_CRYPTO_CIPHER_NULL: + enc_type = 0; + is_null = 1; + break; + case RTE_CRYPTO_CIPHER_KASUMI_F8: + enc_type = KASUMI_F8_ECB; + cipher_key_len = 16; + zsk_flag = K_F8; + break; + case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: + enc_type = SNOW3G_UEA2; + cipher_key_len = 16; + zsk_flag = ZS_EA; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + enc_type = ZUC_EEA3; + cipher_key_len = 16; + zsk_flag = ZS_EA; + break; + case RTE_CRYPTO_CIPHER_AES_XTS: + enc_type = AES_XTS; + cipher_key_len = 16; + break; + case RTE_CRYPTO_CIPHER_3DES_ECB: + enc_type = DES3_ECB; + cipher_key_len = 24; + break; + case RTE_CRYPTO_CIPHER_AES_ECB: + enc_type = AES_ECB; + cipher_key_len = 16; + break; + case RTE_CRYPTO_CIPHER_3DES_CTR: + case RTE_CRYPTO_CIPHER_AES_F8: + case RTE_CRYPTO_CIPHER_ARC4: + CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u", + c_form->algo); + return -1; + default: + CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified", + c_form->algo); + return -1; + } + + if (c_form->key.length < cipher_key_len) { + CPT_LOG_DP_ERR("Invalid cipher params keylen %lu", + (unsigned long) c_form->key.length); + return -1; + } + + sess->zsk_flag = zsk_flag; + sess->aes_gcm = aes_gcm; + sess->aes_ctr = aes_ctr; + sess->iv_offset = c_form->iv.offset; + sess->iv_length = c_form->iv.length; + sess->is_null = is_null; + + cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data, + c_form->key.length, NULL); + + return 0; +} + +static __rte_always_inline int +fill_sess_auth(struct rte_crypto_sym_xform *xform, + struct cpt_sess_misc *sess) +{ + struct rte_crypto_auth_xform *a_form; + auth_type_t auth_type = 0; /* NULL Auth type */ + uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0; + + if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) + goto error_out; + + a_form = &xform->auth; + + if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY) + sess->cpt_op |= CPT_OP_AUTH_VERIFY; + else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->cpt_op |= CPT_OP_AUTH_GENERATE; + else { + CPT_LOG_DP_ERR("Unknown auth operation"); + return -1; + } + + if (a_form->key.length > 64) { + CPT_LOG_DP_ERR("Auth key length is big"); + return -1; + } + + switch (a_form->algo) { + case RTE_CRYPTO_AUTH_SHA1_HMAC: + /* Fall through */ + case RTE_CRYPTO_AUTH_SHA1: + auth_type = SHA1_TYPE; + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + case RTE_CRYPTO_AUTH_SHA256: + auth_type = SHA2_SHA256; + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + case RTE_CRYPTO_AUTH_SHA512: + auth_type = SHA2_SHA512; + break; + case RTE_CRYPTO_AUTH_AES_GMAC: + auth_type = GMAC_TYPE; + aes_gcm = 1; + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + case RTE_CRYPTO_AUTH_SHA224: + auth_type = SHA2_SHA224; + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + case RTE_CRYPTO_AUTH_SHA384: + auth_type = SHA2_SHA384; + break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + case RTE_CRYPTO_AUTH_MD5: + auth_type = MD5_TYPE; + break; + case RTE_CRYPTO_AUTH_KASUMI_F9: + auth_type = KASUMI_F9_ECB; + /* + * Indicate that direction needs to be taken out + * from end of src + */ + zsk_flag = K_F9; + break; + case RTE_CRYPTO_AUTH_SNOW3G_UIA2: + auth_type = SNOW3G_UIA2; + zsk_flag = ZS_IA; + break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + auth_type = ZUC_EIA3; + zsk_flag = ZS_IA; + break; + case RTE_CRYPTO_AUTH_NULL: + auth_type = 0; + is_null = 1; + break; + case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + case RTE_CRYPTO_AUTH_AES_CMAC: + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u", + a_form->algo); + goto error_out; + default: + CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified", + a_form->algo); + goto error_out; + } + + sess->zsk_flag = zsk_flag; + sess->aes_gcm = aes_gcm; + sess->mac_len = a_form->digest_length; + sess->is_null = is_null; + if (zsk_flag) { + sess->auth_iv_offset = a_form->iv.offset; + sess->auth_iv_length = a_form->iv.length; + } + cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data, + a_form->key.length, a_form->digest_length); + + return 0; + +error_out: + return -1; +} + +static __rte_always_inline int +fill_sess_gmac(struct rte_crypto_sym_xform *xform, + struct cpt_sess_misc *sess) +{ + struct rte_crypto_auth_xform *a_form; + cipher_type_t enc_type = 0; /* NULL Cipher type */ + auth_type_t auth_type = 0; /* NULL Auth type */ + uint8_t zsk_flag = 0, aes_gcm = 0; + void *ctx; + + if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) + return -1; + + a_form = &xform->auth; + + if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->cpt_op |= CPT_OP_ENCODE; + else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY) + sess->cpt_op |= CPT_OP_DECODE; + else { + CPT_LOG_DP_ERR("Unknown auth operation"); + return -1; + } + + switch (a_form->algo) { + case RTE_CRYPTO_AUTH_AES_GMAC: + enc_type = AES_GCM; + auth_type = GMAC_TYPE; + break; + default: + CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified", + a_form->algo); + return -1; + } + + sess->zsk_flag = zsk_flag; + sess->aes_gcm = aes_gcm; + sess->is_gmac = 1; + sess->iv_offset = a_form->iv.offset; + sess->iv_length = a_form->iv.length; + sess->mac_len = a_form->digest_length; + ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)), + + cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data, + a_form->key.length, NULL); + cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length); + + return 0; +} + +static __rte_always_inline void * +alloc_op_meta(struct rte_mbuf *m_src, + buf_ptr_t *buf, + int32_t len, + struct rte_mempool *cpt_meta_pool) +{ + uint8_t *mdata; + +#ifndef CPT_ALWAYS_USE_SEPARATE_BUF + if (likely(m_src && (m_src->nb_segs == 1))) { + int32_t tailroom; + phys_addr_t mphys; + + /* Check if tailroom is sufficient to hold meta data */ + tailroom = rte_pktmbuf_tailroom(m_src); + if (likely(tailroom > len + 8)) { + mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len; + mphys = m_src->buf_physaddr + m_src->buf_len; + mdata -= len; + mphys -= len; + buf->vaddr = mdata; + buf->dma_addr = mphys; + buf->size = len; + /* Indicate that this is a mbuf allocated mdata */ + mdata = (uint8_t *)((uint64_t)mdata | 1ull); + return mdata; + } + } +#else + RTE_SET_USED(m_src); +#endif + + if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0)) + return NULL; + + buf->vaddr = mdata; + buf->dma_addr = rte_mempool_virt2iova(mdata); + buf->size = len; + + return mdata; +} + +/** + * cpt_free_metabuf - free metabuf to mempool. + * @param instance: pointer to instance. + * @param objp: pointer to the metabuf. + */ +static __rte_always_inline void +free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool) +{ + bool nofree = ((uintptr_t)mdata & 1ull); + + if (likely(nofree)) + return; + rte_mempool_put(cpt_meta_pool, mdata); +} + +static __rte_always_inline uint32_t +prepare_iov_from_pkt(struct rte_mbuf *pkt, + iov_ptr_t *iovec, uint32_t start_offset) +{ + uint16_t index = 0; + void *seg_data = NULL; + phys_addr_t seg_phys; + int32_t seg_size = 0; + + if (!pkt) { + iovec->buf_cnt = 0; + return 0; + } + + if (!start_offset) { + seg_data = rte_pktmbuf_mtod(pkt, void *); + seg_phys = rte_pktmbuf_mtophys(pkt); + seg_size = pkt->data_len; + } else { + while (start_offset >= pkt->data_len) { + start_offset -= pkt->data_len; + pkt = pkt->next; + } + + seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset); + seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset); + seg_size = pkt->data_len - start_offset; + if (!seg_size) + return 1; + } + + /* first seg */ + iovec->bufs[index].vaddr = seg_data; + iovec->bufs[index].dma_addr = seg_phys; + iovec->bufs[index].size = seg_size; + index++; + pkt = pkt->next; + + while (unlikely(pkt != NULL)) { + seg_data = rte_pktmbuf_mtod(pkt, void *); + seg_phys = rte_pktmbuf_mtophys(pkt); + seg_size = pkt->data_len; + if (!seg_size) + break; + + iovec->bufs[index].vaddr = seg_data; + iovec->bufs[index].dma_addr = seg_phys; + iovec->bufs[index].size = seg_size; + + index++; + + pkt = pkt->next; + } + + iovec->buf_cnt = index; + return 0; +} + +static __rte_always_inline uint32_t +prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt, + fc_params_t *param, + uint32_t *flags) +{ + uint16_t index = 0; + void *seg_data = NULL; + phys_addr_t seg_phys; + uint32_t seg_size = 0; + iov_ptr_t *iovec; + + seg_data = rte_pktmbuf_mtod(pkt, void *); + seg_phys = rte_pktmbuf_mtophys(pkt); + seg_size = pkt->data_len; + + /* first seg */ + if (likely(!pkt->next)) { + uint32_t headroom, tailroom; + + *flags |= SINGLE_BUF_INPLACE; + headroom = rte_pktmbuf_headroom(pkt); + tailroom = rte_pktmbuf_tailroom(pkt); + if (likely((headroom >= 24) && + (tailroom >= 8))) { + /* In 83XX this is prerequivisit for Direct mode */ + *flags |= SINGLE_BUF_HEADTAILROOM; + } + param->bufs[0].vaddr = seg_data; + param->bufs[0].dma_addr = seg_phys; + param->bufs[0].size = seg_size; + return 0; + } + iovec = param->src_iov; + iovec->bufs[index].vaddr = seg_data; + iovec->bufs[index].dma_addr = seg_phys; + iovec->bufs[index].size = seg_size; + index++; + pkt = pkt->next; + + while (unlikely(pkt != NULL)) { + seg_data = rte_pktmbuf_mtod(pkt, void *); + seg_phys = rte_pktmbuf_mtophys(pkt); + seg_size = pkt->data_len; + + if (!seg_size) + break; + + iovec->bufs[index].vaddr = seg_data; + iovec->bufs[index].dma_addr = seg_phys; + iovec->bufs[index].size = seg_size; + + index++; + + pkt = pkt->next; + } + + iovec->buf_cnt = index; + return 0; +} + +static __rte_always_inline int +fill_fc_params(struct rte_crypto_op *cop, + struct cpt_sess_misc *sess_misc, + struct cpt_qp_meta_info *m_info, + void **mdata_ptr, + void **prep_req) +{ + uint32_t space = 0; + struct rte_crypto_sym_op *sym_op = cop->sym; + void *mdata = NULL; + uintptr_t *op; + uint32_t mc_hash_off; + uint32_t flags = 0; + uint64_t d_offs, d_lens; + struct rte_mbuf *m_src, *m_dst; + uint8_t cpt_op = sess_misc->cpt_op; + uint8_t zsk_flag = sess_misc->zsk_flag; + uint8_t aes_gcm = sess_misc->aes_gcm; + uint16_t mac_len = sess_misc->mac_len; +#ifdef CPT_ALWAYS_USE_SG_MODE + uint8_t inplace = 0; +#else + uint8_t inplace = 1; +#endif + fc_params_t fc_params; + char src[SRC_IOV_SIZE]; + char dst[SRC_IOV_SIZE]; + uint32_t iv_buf[4]; + int ret; + + if (likely(sess_misc->iv_length)) { + flags |= VALID_IV_BUF; + fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, + uint8_t *, sess_misc->iv_offset); + if (sess_misc->aes_ctr && + unlikely(sess_misc->iv_length != 16)) { + memcpy((uint8_t *)iv_buf, + rte_crypto_op_ctod_offset(cop, + uint8_t *, sess_misc->iv_offset), 12); + iv_buf[3] = rte_cpu_to_be_32(0x1); + fc_params.iv_buf = iv_buf; + } + } + + if (zsk_flag) { + fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop, + uint8_t *, + sess_misc->auth_iv_offset); + if (zsk_flag == K_F9) { + CPT_LOG_DP_ERR("Should not reach here for " + "kasumi F9\n"); + } + if (zsk_flag != ZS_EA) + inplace = 0; + } + m_src = sym_op->m_src; + m_dst = sym_op->m_dst; + + if (aes_gcm) { + uint8_t *salt; + uint8_t *aad_data; + uint16_t aad_len; + + d_offs = sym_op->aead.data.offset; + d_lens = sym_op->aead.data.length; + mc_hash_off = sym_op->aead.data.offset + + sym_op->aead.data.length; + + aad_data = sym_op->aead.aad.data; + aad_len = sess_misc->aad_length; + if (likely((aad_data + aad_len) == + rte_pktmbuf_mtod_offset(m_src, + uint8_t *, + sym_op->aead.data.offset))) { + d_offs = (d_offs - aad_len) | (d_offs << 16); + d_lens = (d_lens + aad_len) | (d_lens << 32); + } else { + fc_params.aad_buf.vaddr = sym_op->aead.aad.data; + fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr; + fc_params.aad_buf.size = aad_len; + flags |= VALID_AAD_BUF; + inplace = 0; + d_offs = d_offs << 16; + d_lens = d_lens << 32; + } + + salt = fc_params.iv_buf; + if (unlikely(*(uint32_t *)salt != sess_misc->salt)) { + cpt_fc_salt_update(SESS_PRIV(sess_misc), salt); + sess_misc->salt = *(uint32_t *)salt; + } + fc_params.iv_buf = salt + 4; + if (likely(mac_len)) { + struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst : + m_src; + + if (!m) + m = m_src; + + /* hmac immediately following data is best case */ + if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) + + mc_hash_off != + (uint8_t *)sym_op->aead.digest.data)) { + flags |= VALID_MAC_BUF; + fc_params.mac_buf.size = sess_misc->mac_len; + fc_params.mac_buf.vaddr = + sym_op->aead.digest.data; + fc_params.mac_buf.dma_addr = + sym_op->aead.digest.phys_addr; + inplace = 0; + } + } + } else { + d_offs = sym_op->cipher.data.offset; + d_lens = sym_op->cipher.data.length; + mc_hash_off = sym_op->cipher.data.offset + + sym_op->cipher.data.length; + d_offs = (d_offs << 16) | sym_op->auth.data.offset; + d_lens = (d_lens << 32) | sym_op->auth.data.length; + + if (mc_hash_off < (sym_op->auth.data.offset + + sym_op->auth.data.length)){ + mc_hash_off = (sym_op->auth.data.offset + + sym_op->auth.data.length); + } + /* for gmac, salt should be updated like in gcm */ + if (unlikely(sess_misc->is_gmac)) { + uint8_t *salt; + salt = fc_params.iv_buf; + if (unlikely(*(uint32_t *)salt != sess_misc->salt)) { + cpt_fc_salt_update(SESS_PRIV(sess_misc), salt); + sess_misc->salt = *(uint32_t *)salt; + } + fc_params.iv_buf = salt + 4; + } + if (likely(mac_len)) { + struct rte_mbuf *m; + + m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src; + if (!m) + m = m_src; + + /* hmac immediately following data is best case */ + if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) + + mc_hash_off != + (uint8_t *)sym_op->auth.digest.data)) { + flags |= VALID_MAC_BUF; + fc_params.mac_buf.size = + sess_misc->mac_len; + fc_params.mac_buf.vaddr = + sym_op->auth.digest.data; + fc_params.mac_buf.dma_addr = + sym_op->auth.digest.phys_addr; + inplace = 0; + } + } + } + fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc); + fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr; + + if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE)) + inplace = 0; + + if (likely(!m_dst && inplace)) { + /* Case of single buffer without AAD buf or + * separate mac buf in place and + * not air crypto + */ + fc_params.dst_iov = fc_params.src_iov = (void *)src; + + if (unlikely(prepare_iov_from_pkt_inplace(m_src, + &fc_params, + &flags))) { + CPT_LOG_DP_ERR("Prepare inplace src iov failed"); + ret = -EINVAL; + goto err_exit; + } + + } else { + /* Out of place processing */ + fc_params.src_iov = (void *)src; + fc_params.dst_iov = (void *)dst; + + /* Store SG I/O in the api for reuse */ + if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) { + CPT_LOG_DP_ERR("Prepare src iov failed"); + ret = -EINVAL; + goto err_exit; + } + + if (unlikely(m_dst != NULL)) { + uint32_t pkt_len; + + /* Try to make room as much as src has */ + m_dst = sym_op->m_dst; + pkt_len = rte_pktmbuf_pkt_len(m_dst); + + if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) { + pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len; + if (!rte_pktmbuf_append(m_dst, pkt_len)) { + CPT_LOG_DP_ERR("Not enough space in " + "m_dst %p, need %u" + " more", + m_dst, pkt_len); + ret = -EINVAL; + goto err_exit; + } + } + + if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) { + CPT_LOG_DP_ERR("Prepare dst iov failed for " + "m_dst %p", m_dst); + ret = -EINVAL; + goto err_exit; + } + } else { + fc_params.dst_iov = (void *)src; + } + } + + if (likely(flags & SINGLE_BUF_HEADTAILROOM)) + mdata = alloc_op_meta(m_src, &fc_params.meta_buf, + m_info->lb_mlen, m_info->pool); + else + mdata = alloc_op_meta(NULL, &fc_params.meta_buf, + m_info->sg_mlen, m_info->pool); + + if (unlikely(mdata == NULL)) { + CPT_LOG_DP_ERR("Error allocating meta buffer for request"); + ret = -ENOMEM; + goto err_exit; + } + + op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull); + op[0] = (uintptr_t)mdata; + op[1] = (uintptr_t)cop; + op[2] = op[3] = 0; /* Used to indicate auth verify */ + space += 4 * sizeof(uint64_t); + + fc_params.meta_buf.vaddr = (uint8_t *)op + space; + fc_params.meta_buf.dma_addr += space; + fc_params.meta_buf.size -= space; + + /* Finally prepare the instruction */ + if (cpt_op & CPT_OP_ENCODE) + *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, + &fc_params, op); + else + *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, + &fc_params, op); + + if (unlikely(*prep_req == NULL)) { + CPT_LOG_DP_ERR("Preparing request failed due to bad input arg"); + ret = -EINVAL; + goto free_mdata_and_exit; + } + + *mdata_ptr = mdata; + + return 0; + +free_mdata_and_exit: + free_op_meta(mdata, m_info->pool); +err_exit: + return ret; +} + +static __rte_always_inline void +compl_auth_verify(struct rte_crypto_op *op, + uint8_t *gen_mac, + uint64_t mac_len) +{ + uint8_t *mac; + struct rte_crypto_sym_op *sym_op = op->sym; + + if (sym_op->auth.digest.data) + mac = sym_op->auth.digest.data; + else + mac = rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, + sym_op->auth.data.length + + sym_op->auth.data.offset); + if (!mac) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return; + } + + if (memcmp(mac, gen_mac, mac_len)) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + else + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +} + +static __rte_always_inline int +instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess) +{ + struct rte_crypto_sym_xform *chain; + + CPT_PMD_INIT_FUNC_TRACE(); + + if (cpt_is_algo_supported(xform)) + goto err; + + chain = xform; + while (chain) { + switch (chain->type) { + case RTE_CRYPTO_SYM_XFORM_AEAD: + if (fill_sess_aead(chain, sess)) + goto err; + break; + case RTE_CRYPTO_SYM_XFORM_CIPHER: + if (fill_sess_cipher(chain, sess)) + goto err; + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { + if (fill_sess_gmac(chain, sess)) + goto err; + } else { + if (fill_sess_auth(chain, sess)) + goto err; + } + break; + default: + CPT_LOG_DP_ERR("Invalid crypto xform type"); + break; + } + chain = chain->next; + } + + return 0; + +err: + return -1; +} + +static __rte_always_inline void +find_kasumif9_direction_and_length(uint8_t *src, + uint32_t counter_num_bytes, + uint32_t *addr_length_in_bits, + uint8_t *addr_direction) +{ + uint8_t found = 0; + uint32_t pos; + uint8_t last_byte; + while (!found && counter_num_bytes > 0) { + counter_num_bytes--; + if (src[counter_num_bytes] == 0x00) + continue; + pos = rte_bsf32(src[counter_num_bytes]); + if (pos == 7) { + if (likely(counter_num_bytes > 0)) { + last_byte = src[counter_num_bytes - 1]; + *addr_direction = last_byte & 0x1; + *addr_length_in_bits = counter_num_bytes * 8 + - 1; + } + } else { + last_byte = src[counter_num_bytes]; + *addr_direction = (last_byte >> (pos + 1)) & 0x1; + *addr_length_in_bits = counter_num_bytes * 8 + + (8 - (pos + 2)); + } + found = 1; + } +} + +/* + * This handles all auth only except AES_GMAC + */ +static __rte_always_inline int +fill_digest_params(struct rte_crypto_op *cop, + struct cpt_sess_misc *sess, + struct cpt_qp_meta_info *m_info, + void **mdata_ptr, + void **prep_req) +{ + uint32_t space = 0; + struct rte_crypto_sym_op *sym_op = cop->sym; + void *mdata; + phys_addr_t mphys; + uint64_t *op; + uint32_t auth_range_off; + uint32_t flags = 0; + uint64_t d_offs = 0, d_lens; + struct rte_mbuf *m_src, *m_dst; + uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK; + uint8_t zsk_flag = sess->zsk_flag; + uint16_t mac_len = sess->mac_len; + fc_params_t params; + char src[SRC_IOV_SIZE]; + uint8_t iv_buf[16]; + int ret; + + memset(¶ms, 0, sizeof(fc_params_t)); + + m_src = sym_op->m_src; + + /* For just digest lets force mempool alloc */ + mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen, + m_info->pool); + if (mdata == NULL) { + ret = -ENOMEM; + goto err_exit; + } + + mphys = params.meta_buf.dma_addr; + + op = mdata; + op[0] = (uintptr_t)mdata; + op[1] = (uintptr_t)cop; + op[2] = op[3] = 0; /* Used to indicate auth verify */ + space += 4 * sizeof(uint64_t); + + auth_range_off = sym_op->auth.data.offset; + + flags = VALID_MAC_BUF; + params.src_iov = (void *)src; + if (unlikely(zsk_flag)) { + /* + * Since for Zuc, Kasumi, Snow3g offsets are in bits + * we will send pass through even for auth only case, + * let MC handle it + */ + d_offs = auth_range_off; + auth_range_off = 0; + params.auth_iv_buf = rte_crypto_op_ctod_offset(cop, + uint8_t *, sess->auth_iv_offset); + if (zsk_flag == K_F9) { + uint32_t length_in_bits, num_bytes; + uint8_t *src, direction = 0; + uint32_t counter_num_bytes; + + memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src, + uint8_t *), 8); + /* + * This is kasumi f9, take direction from + * source buffer + */ + length_in_bits = cop->sym->auth.data.length; + num_bytes = (length_in_bits >> 3); + counter_num_bytes = num_bytes; + src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *); + find_kasumif9_direction_and_length(src, + counter_num_bytes, + &length_in_bits, + &direction); + length_in_bits -= 64; + cop->sym->auth.data.offset += 64; + d_offs = cop->sym->auth.data.offset; + auth_range_off = d_offs / 8; + cop->sym->auth.data.length = length_in_bits; + + /* Store it at end of auth iv */ + iv_buf[8] = direction; + params.auth_iv_buf = iv_buf; + } + } + + d_lens = sym_op->auth.data.length; + + params.ctx_buf.vaddr = SESS_PRIV(sess); + params.ctx_buf.dma_addr = sess->ctx_dma_addr; + + if (auth_op == CPT_OP_AUTH_GENERATE) { + if (sym_op->auth.digest.data) { + /* + * Digest to be generated + * in separate buffer + */ + params.mac_buf.size = + sess->mac_len; + params.mac_buf.vaddr = + sym_op->auth.digest.data; + params.mac_buf.dma_addr = + sym_op->auth.digest.phys_addr; + } else { + uint32_t off = sym_op->auth.data.offset + + sym_op->auth.data.length; + int32_t dlen, space; + + m_dst = sym_op->m_dst ? + sym_op->m_dst : sym_op->m_src; + dlen = rte_pktmbuf_pkt_len(m_dst); + + space = off + mac_len - dlen; + if (space > 0) + if (!rte_pktmbuf_append(m_dst, space)) { + CPT_LOG_DP_ERR("Failed to extend " + "mbuf by %uB", space); + ret = -EINVAL; + goto free_mdata_and_exit; + } + + params.mac_buf.vaddr = + rte_pktmbuf_mtod_offset(m_dst, void *, off); + params.mac_buf.dma_addr = + rte_pktmbuf_mtophys_offset(m_dst, off); + params.mac_buf.size = mac_len; + } + } else { + /* Need space for storing generated mac */ + params.mac_buf.vaddr = (uint8_t *)mdata + space; + params.mac_buf.dma_addr = mphys + space; + params.mac_buf.size = mac_len; + space += RTE_ALIGN_CEIL(mac_len, 8); + op[2] = (uintptr_t)params.mac_buf.vaddr; + op[3] = mac_len; + } + + params.meta_buf.vaddr = (uint8_t *)mdata + space; + params.meta_buf.dma_addr = mphys + space; + params.meta_buf.size -= space; + + /* Out of place processing */ + params.src_iov = (void *)src; + + /*Store SG I/O in the api for reuse */ + if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) { + CPT_LOG_DP_ERR("Prepare src iov failed"); + ret = -EINVAL; + goto free_mdata_and_exit; + } + + *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op); + if (unlikely(*prep_req == NULL)) { + ret = -EINVAL; + goto free_mdata_and_exit; + } + + *mdata_ptr = mdata; + + return 0; + +free_mdata_and_exit: + free_op_meta(mdata, m_info->pool); +err_exit: + return ret; +} + +#endif /*_CPT_UCODE_H_ */ diff --git a/src/seastar/dpdk/drivers/common/cpt/meson.build b/src/seastar/dpdk/drivers/common/cpt/meson.build new file mode 100644 index 000000000..0a905aa43 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc + +sources = files('cpt_pmd_ops_helper.c') + +deps = ['kvargs', 'pci', 'cryptodev'] +includes += include_directories('../../crypto/octeontx') +allow_experimental_apis = true diff --git a/src/seastar/dpdk/drivers/common/cpt/rte_common_cpt_version.map b/src/seastar/dpdk/drivers/common/cpt/rte_common_cpt_version.map new file mode 100644 index 000000000..dec614f0d --- /dev/null +++ b/src/seastar/dpdk/drivers/common/cpt/rte_common_cpt_version.map @@ -0,0 +1,6 @@ +DPDK_18.11 { + global: + + cpt_pmd_ops_helper_get_mlen_direct_mode; + cpt_pmd_ops_helper_get_mlen_sg_mode; +}; diff --git a/src/seastar/dpdk/drivers/common/dpaax/Makefile b/src/seastar/dpdk/drivers/common/dpaax/Makefile new file mode 100644 index 000000000..94d2cf0ce --- /dev/null +++ b/src/seastar/dpdk/drivers/common/dpaax/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_common_dpaax.a + +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# versioning export map +EXPORT_MAP := rte_common_dpaax_version.map + +# library version +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-y += dpaax_iova_table.c + +LDLIBS += -lrte_eal + +SYMLINK-y-include += dpaax_iova_table.h + +include $(RTE_SDK)/mk/rte.lib.mk
\ No newline at end of file diff --git a/src/seastar/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/src/seastar/dpdk/drivers/common/dpaax/dpaax_iova_table.c new file mode 100644 index 000000000..2dd38a920 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/dpaax/dpaax_iova_table.c @@ -0,0 +1,465 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#include <rte_memory.h> + +#include "dpaax_iova_table.h" +#include "dpaax_logs.h" + +/* Global dpaax logger identifier */ +int dpaax_logger; + +/* Global table reference */ +struct dpaax_iova_table *dpaax_iova_table_p; + +static int dpaax_handle_memevents(void); + +/* A structure representing the device-tree node available in /proc/device-tree. + */ +struct reg_node { + phys_addr_t addr; + size_t len; +}; + +/* A ntohll equivalent routine + * XXX: This is only applicable for 64 bit environment. + */ +static void +rotate_8(unsigned char *arr) +{ + uint32_t temp; + uint32_t *first_half; + uint32_t *second_half; + + first_half = (uint32_t *)(arr); + second_half = (uint32_t *)(arr + 4); + + temp = *first_half; + *first_half = *second_half; + *second_half = temp; + + *first_half = ntohl(*first_half); + *second_half = ntohl(*second_half); +} + +/* read_memory_nodes + * Memory layout for DPAAx platforms (LS1043, LS1046, LS1088, LS2088, LX2160) + * are populated by Uboot and available in device tree: + * /proc/device-tree/memory@<address>/reg <= register. + * Entries are of the form: + * (<8 byte start addr><8 byte length>)(..more similar blocks of start,len>).. + * + * @param count + * OUT populate number of entries found in memory node + * @return + * Pointer to array of reg_node elements, count size + */ +static struct reg_node * +read_memory_node(unsigned int *count) +{ + int fd, ret, i; + unsigned int j; + glob_t result = {0}; + struct stat statbuf = {0}; + char file_data[MEM_NODE_FILE_LEN]; + struct reg_node *nodes = NULL; + + *count = 0; + + ret = glob(MEM_NODE_PATH_GLOB, 0, NULL, &result); + if (ret != 0) { + DPAAX_DEBUG("Unable to glob device-tree memory node: (%s)(%d)", + MEM_NODE_PATH_GLOB, ret); + goto out; + } + + if (result.gl_pathc != 1) { + /* Either more than one memory@<addr> node found, or none. + * In either case, cannot work ahead. + */ + DPAAX_DEBUG("Found (%zu) entries in device-tree. Not supported!", + result.gl_pathc); + goto out; + } + + DPAAX_DEBUG("Opening and parsing device-tree node: (%s)", + result.gl_pathv[0]); + fd = open(result.gl_pathv[0], O_RDONLY); + if (fd < 0) { + DPAAX_DEBUG("Unable to open the device-tree node: (%s)(fd=%d)", + MEM_NODE_PATH_GLOB, fd); + goto cleanup; + } + + /* Stat to get the file size */ + ret = fstat(fd, &statbuf); + if (ret != 0) { + DPAAX_DEBUG("Unable to get device-tree memory node size."); + goto cleanup; + } + + DPAAX_DEBUG("Size of device-tree mem node: %lu", statbuf.st_size); + if (statbuf.st_size > MEM_NODE_FILE_LEN) { + DPAAX_DEBUG("More memory nodes available than assumed."); + DPAAX_DEBUG("System may not work properly!"); + } + + ret = read(fd, file_data, statbuf.st_size > MEM_NODE_FILE_LEN ? + MEM_NODE_FILE_LEN : statbuf.st_size); + if (ret <= 0) { + DPAAX_DEBUG("Unable to read device-tree memory node: (%d)", + ret); + goto cleanup; + } + + /* The reg node should be multiple of 16 bytes, 8 bytes each for addr + * and len. + */ + *count = (statbuf.st_size / 16); + if ((*count) <= 0 || (statbuf.st_size % 16 != 0)) { + DPAAX_DEBUG("Invalid memory node values or count. (size=%lu)", + statbuf.st_size); + goto cleanup; + } + + /* each entry is of 16 bytes, and size/16 is total count of entries */ + nodes = malloc(sizeof(struct reg_node) * (*count)); + if (!nodes) { + DPAAX_DEBUG("Failure in allocating working memory."); + goto cleanup; + } + memset(nodes, 0, sizeof(struct reg_node) * (*count)); + + for (i = 0, j = 0; i < (statbuf.st_size) && j < (*count); i += 16, j++) { + memcpy(&nodes[j], file_data + i, 16); + /* Rotate (ntohl) each 8 byte entry */ + rotate_8((unsigned char *)(&(nodes[j].addr))); + rotate_8((unsigned char *)(&(nodes[j].len))); + } + + DPAAX_DEBUG("Device-tree memory node data:"); + do { + DPAAX_DEBUG("\n %08" PRIx64 " %08zu", nodes[j].addr, nodes[j].len); + } while (--j); + +cleanup: + close(fd); + globfree(&result); +out: + return nodes; +} + +int +dpaax_iova_table_populate(void) +{ + int ret; + unsigned int i, node_count; + size_t tot_memory_size, total_table_size; + struct reg_node *nodes; + struct dpaax_iovat_element *entry; + + /* dpaax_iova_table_p is a singleton - only one instance should be + * created. + */ + if (dpaax_iova_table_p) { + DPAAX_DEBUG("Multiple allocation attempt for IOVA Table (%p)", + dpaax_iova_table_p); + /* This can be an error case as well - some path not cleaning + * up table - but, for now, it is assumed that if IOVA Table + * pointer is valid, table is allocated. + */ + return 0; + } + + nodes = read_memory_node(&node_count); + if (nodes == NULL) { + DPAAX_WARN("PA->VA translation not available;"); + DPAAX_WARN("Expect performance impact."); + return -1; + } + + tot_memory_size = 0; + for (i = 0; i < node_count; i++) + tot_memory_size += nodes[i].len; + + DPAAX_DEBUG("Total available PA memory size: %zu", tot_memory_size); + + /* Total table size = meta data + tot_memory_size/8 */ + total_table_size = sizeof(struct dpaax_iova_table) + + (sizeof(struct dpaax_iovat_element) * node_count) + + ((tot_memory_size / DPAAX_MEM_SPLIT) * sizeof(uint64_t)); + + /* TODO: This memory doesn't need to shared but needs to be always + * pinned to RAM (no swap out) - using hugepage rather than malloc + */ + dpaax_iova_table_p = rte_zmalloc(NULL, total_table_size, 0); + if (dpaax_iova_table_p == NULL) { + DPAAX_WARN("Unable to allocate memory for PA->VA Table;"); + DPAAX_WARN("PA->VA translation not available;"); + DPAAX_WARN("Expect performance impact."); + free(nodes); + return -1; + } + + /* Initialize table */ + dpaax_iova_table_p->count = node_count; + entry = dpaax_iova_table_p->entries; + + DPAAX_DEBUG("IOVA Table entries: (entry start = %p)", (void *)entry); + DPAAX_DEBUG("\t(entry),(start),(len),(next)"); + + for (i = 0; i < node_count; i++) { + /* dpaax_iova_table_p + * | dpaax_iova_table_p->entries + * | | + * | | + * V V + * +------+------+-------+---+----------+---------+--- + * |iova_ |entry | entry | | pages | pages | + * |table | 1 | 2 |...| entry 1 | entry2 | + * +-----'+.-----+-------+---+;---------+;--------+--- + * \ \ / / + * `~~~~~~|~~~~~>pages / + * \ / + * `~~~~~~~~~~~>pages + */ + entry[i].start = nodes[i].addr; + entry[i].len = nodes[i].len; + if (i > 0) + entry[i].pages = entry[i-1].pages + + ((entry[i-1].len/DPAAX_MEM_SPLIT)); + else + entry[i].pages = (uint64_t *)((unsigned char *)entry + + (sizeof(struct dpaax_iovat_element) * + node_count)); + + DPAAX_DEBUG("\t(%u),(%8"PRIx64"),(%8zu),(%8p)", + i, entry[i].start, entry[i].len, entry[i].pages); + } + + /* Release memory associated with nodes array - not required now */ + free(nodes); + + DPAAX_DEBUG("Adding mem-event handler\n"); + ret = dpaax_handle_memevents(); + if (ret) { + DPAAX_ERR("Unable to add mem-event handler"); + DPAAX_WARN("Cases with non-buffer pool mem won't work!"); + } + + return 0; +} + +void +dpaax_iova_table_depopulate(void) +{ + if (dpaax_iova_table_p == NULL) + return; + + rte_free(dpaax_iova_table_p->entries); + dpaax_iova_table_p = NULL; + + DPAAX_DEBUG("IOVA Table cleanedup"); +} + +int +dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length) +{ + int found = 0; + unsigned int i; + size_t req_length = length, e_offset; + struct dpaax_iovat_element *entry; + uintptr_t align_vaddr; + phys_addr_t align_paddr; + + if (unlikely(dpaax_iova_table_p == NULL)) + return -1; + + align_paddr = paddr & DPAAX_MEM_SPLIT_MASK; + align_vaddr = ((uintptr_t)vaddr & DPAAX_MEM_SPLIT_MASK); + + /* Check if paddr is available in table */ + entry = dpaax_iova_table_p->entries; + for (i = 0; i < dpaax_iova_table_p->count; i++) { + if (align_paddr < entry[i].start) { + /* Address lower than start, but not found in previous + * iteration shouldn't exist. + */ + DPAAX_ERR("Add: Incorrect entry for PA->VA Table" + "(%"PRIu64")", paddr); + DPAAX_ERR("Add: Lowest address: %"PRIu64"", + entry[i].start); + return -1; + } + + if (align_paddr > (entry[i].start + entry[i].len)) + continue; + + /* align_paddr >= start && align_paddr < (start + len) */ + found = 1; + + do { + e_offset = ((align_paddr - entry[i].start) / DPAAX_MEM_SPLIT); + /* TODO: Whatif something already exists at this + * location - is that an error? For now, ignoring the + * case. + */ + entry[i].pages[e_offset] = align_vaddr; + DPAAX_DEBUG("Added: vaddr=%zu for Phy:%"PRIu64" at %zu" + " remaining len %zu", align_vaddr, + align_paddr, e_offset, req_length); + + /* Incoming request can be larger than the + * DPAAX_MEM_SPLIT size - in which case, multiple + * entries in entry->pages[] are filled up. + */ + if (req_length <= DPAAX_MEM_SPLIT) + break; + align_paddr += DPAAX_MEM_SPLIT; + align_vaddr += DPAAX_MEM_SPLIT; + req_length -= DPAAX_MEM_SPLIT; + } while (1); + + break; + } + + if (!found) { + /* There might be case where the incoming physical address is + * beyond the address discovered in the memory node of + * device-tree. Specially if some malloc'd area is used by EAL + * and the memevent handlers passes that across. But, this is + * not necessarily an error. + */ + DPAAX_DEBUG("Add: Unable to find slot for vaddr:(%p)," + " phy(%"PRIu64")", + vaddr, paddr); + return -1; + } + + DPAAX_DEBUG("Add: Found slot at (%"PRIu64")[(%zu)] for vaddr:(%p)," + " phy(%"PRIu64"), len(%zu)", entry[i].start, e_offset, + vaddr, paddr, length); + return 0; +} + +/* dpaax_iova_table_dump + * Dump the table, with its entries, on screen. Only works in Debug Mode + * Not for weak hearted - the tables can get quite large + */ +void +dpaax_iova_table_dump(void) +{ + unsigned int i, j; + struct dpaax_iovat_element *entry; + + /* In case DEBUG is not enabled, some 'if' conditions might misbehave + * as they have nothing else in them except a DPAAX_DEBUG() which if + * tuned out would leave 'if' naked. + */ + if (rte_log_get_global_level() < RTE_LOG_DEBUG) { + DPAAX_ERR("Set log level to Debug for PA->Table dump!"); + return; + } + + DPAAX_DEBUG(" === Start of PA->VA Translation Table ==="); + if (dpaax_iova_table_p == NULL) + DPAAX_DEBUG("\tNULL"); + + entry = dpaax_iova_table_p->entries; + for (i = 0; i < dpaax_iova_table_p->count; i++) { + DPAAX_DEBUG("\t(%16i),(%16"PRIu64"),(%16zu),(%16p)", + i, entry[i].start, entry[i].len, entry[i].pages); + DPAAX_DEBUG("\t\t (PA), (VA)"); + for (j = 0; j < (entry->len/DPAAX_MEM_SPLIT); j++) { + if (entry[i].pages[j] == 0) + continue; + DPAAX_DEBUG("\t\t(%16"PRIx64"),(%16"PRIx64")", + (entry[i].start + (j * sizeof(uint64_t))), + entry[i].pages[j]); + } + } + DPAAX_DEBUG(" === End of PA->VA Translation Table ==="); +} + +static void +dpaax_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, + void *arg __rte_unused) +{ + struct rte_memseg_list *msl; + struct rte_memseg *ms; + size_t cur_len = 0, map_len = 0; + phys_addr_t phys_addr; + void *virt_addr; + int ret; + + DPAAX_DEBUG("Called with addr=%p, len=%zu", addr, len); + + msl = rte_mem_virt2memseg_list(addr); + + while (cur_len < len) { + const void *va = RTE_PTR_ADD(addr, cur_len); + + ms = rte_mem_virt2memseg(va, msl); + phys_addr = rte_mem_virt2phy(ms->addr); + virt_addr = ms->addr; + map_len = ms->len; + + DPAAX_DEBUG("Request for %s, va=%p, virt_addr=%p," + "iova=%"PRIu64", map_len=%zu", + type == RTE_MEM_EVENT_ALLOC ? + "alloc" : "dealloc", + va, virt_addr, phys_addr, map_len); + + if (type == RTE_MEM_EVENT_ALLOC) + ret = dpaax_iova_table_update(phys_addr, virt_addr, + map_len); + else + /* In case of mem_events for MEM_EVENT_FREE, complete + * hugepage is released and its PA entry is set to 0. + */ + ret = dpaax_iova_table_update(phys_addr, 0, map_len); + + if (ret != 0) { + DPAAX_DEBUG("PA-Table entry update failed. " + "Map=%d, addr=%p, len=%zu, err:(%d)", + type, va, map_len, ret); + return; + } + + cur_len += map_len; + } +} + +static int +dpaax_memevent_walk_memsegs(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, size_t len, + void *arg __rte_unused) +{ + DPAAX_DEBUG("Walking for %p (pa=%"PRIu64") and len %zu", + ms->addr, ms->phys_addr, len); + dpaax_iova_table_update(rte_mem_virt2phy(ms->addr), ms->addr, len); + return 0; +} + +static int +dpaax_handle_memevents(void) +{ + /* First, walk through all memsegs and pin them, before installing + * handler. This assures that all memseg which have already been + * identified/allocated by EAL, are already part of PA->VA Table. This + * is especially for cases where application allocates memory before + * the EAL or this is an externally allocated memory passed to EAL. + */ + rte_memseg_contig_walk_thread_unsafe(dpaax_memevent_walk_memsegs, NULL); + + return rte_mem_event_callback_register("dpaax_memevents_cb", + dpaax_memevent_cb, NULL); +} + +RTE_INIT(dpaax_log) +{ + dpaax_logger = rte_log_register("pmd.common.dpaax"); + if (dpaax_logger >= 0) + rte_log_set_level(dpaax_logger, RTE_LOG_ERR); +} diff --git a/src/seastar/dpdk/drivers/common/dpaax/dpaax_iova_table.h b/src/seastar/dpdk/drivers/common/dpaax/dpaax_iova_table.h new file mode 100644 index 000000000..138827e7b --- /dev/null +++ b/src/seastar/dpdk/drivers/common/dpaax/dpaax_iova_table.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _DPAAX_IOVA_TABLE_H_ +#define _DPAAX_IOVA_TABLE_H_ + +#include <unistd.h> +#include <stdio.h> +#include <string.h> +#include <stdbool.h> +#include <stdlib.h> +#include <inttypes.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <dirent.h> +#include <fcntl.h> +#include <glob.h> +#include <errno.h> +#include <arpa/inet.h> + +#include <rte_eal.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_malloc.h> + +struct dpaax_iovat_element { + phys_addr_t start; /**< Start address of block of physical pages */ + size_t len; /**< Difference of end-start for quick access */ + uint64_t *pages; /**< VA for each physical page in this block */ +}; + +struct dpaax_iova_table { + unsigned int count; /**< No. of blocks of contiguous physical pages */ + struct dpaax_iovat_element entries[0]; +}; + +/* Pointer to the table, which is common for DPAA/DPAA2 and only a single + * instance is required across net/crypto/event drivers. This table is + * populated iff devices are found on the bus. + */ +extern struct dpaax_iova_table *dpaax_iova_table_p; + +/* Device tree file for memory layout is named 'memory@<addr>' where the 'addr' + * is SoC dependent, or even Uboot fixup dependent. + */ +#define MEM_NODE_PATH_GLOB "/proc/device-tree/memory[@0-9]*/reg" +/* Device file should be multiple of 16 bytes, each containing 8 byte of addr + * and its length. Assuming max of 5 entries. + */ +#define MEM_NODE_FILE_LEN ((16 * 5) + 1) + +/* Table is made up of DPAAX_MEM_SPLIT elements for each contiguous zone. This + * helps avoid separate handling for cases where more than one size of hugepage + * is supported. + */ +#define DPAAX_MEM_SPLIT (1<<21) +#define DPAAX_MEM_SPLIT_MASK ~(DPAAX_MEM_SPLIT - 1) /**< Floor aligned */ +#define DPAAX_MEM_SPLIT_MASK_OFF (DPAAX_MEM_SPLIT - 1) /**< Offset */ + +/* APIs exposed */ +int dpaax_iova_table_populate(void); +void dpaax_iova_table_depopulate(void); +int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length); +void dpaax_iova_table_dump(void); + +static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __attribute__((hot)); + +static inline void * +dpaax_iova_table_get_va(phys_addr_t paddr) { + unsigned int i = 0, index; + void *vaddr = 0; + phys_addr_t paddr_align = paddr & DPAAX_MEM_SPLIT_MASK; + size_t offset = paddr & DPAAX_MEM_SPLIT_MASK_OFF; + struct dpaax_iovat_element *entry; + + if (unlikely(dpaax_iova_table_p == NULL)) + return NULL; + + entry = dpaax_iova_table_p->entries; + + do { + if (unlikely(i > dpaax_iova_table_p->count)) + break; + + if (paddr_align < entry[i].start) { + /* Incorrect paddr; Not in memory range */ + return NULL; + } + + if (paddr_align > (entry[i].start + entry[i].len)) { + i++; + continue; + } + + /* paddr > entry->start && paddr <= entry->(start+len) */ + index = (paddr_align - entry[i].start)/DPAAX_MEM_SPLIT; + vaddr = (void *)((uintptr_t)entry[i].pages[index] + offset); + break; + } while (1); + + return vaddr; +} + +#endif /* _DPAAX_IOVA_TABLE_H_ */ diff --git a/src/seastar/dpdk/drivers/common/dpaax/dpaax_logs.h b/src/seastar/dpdk/drivers/common/dpaax/dpaax_logs.h new file mode 100644 index 000000000..bf1b27cc1 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/dpaax/dpaax_logs.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _DPAAX_LOGS_H_ +#define _DPAAX_LOGS_H_ + +#include <rte_log.h> + +extern int dpaax_logger; + +#define DPAAX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaax_logger, "dpaax: " fmt "\n", \ + ##args) + +/* Debug logs are with Function names */ +#define DPAAX_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaax_logger, "dpaax: %s(): " fmt "\n", \ + __func__, ##args) + +#define DPAAX_INFO(fmt, args...) \ + DPAAX_LOG(INFO, fmt, ## args) +#define DPAAX_ERR(fmt, args...) \ + DPAAX_LOG(ERR, fmt, ## args) +#define DPAAX_WARN(fmt, args...) \ + DPAAX_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAAX_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAAX_DP_DEBUG(fmt, args...) \ + DPAAX_DP_LOG(DEBUG, fmt, ## args) +#define DPAAX_DP_INFO(fmt, args...) \ + DPAAX_DP_LOG(INFO, fmt, ## args) +#define DPAAX_DP_WARN(fmt, args...) \ + DPAAX_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAAX_LOGS_H_ */ diff --git a/src/seastar/dpdk/drivers/common/dpaax/meson.build b/src/seastar/dpdk/drivers/common/dpaax/meson.build new file mode 100644 index 000000000..78378e2a6 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/dpaax/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 NXP + +allow_experimental_apis = true + +if not is_linux + build = false +endif + +sources = files('dpaax_iova_table.c') + +cflags += ['-D_GNU_SOURCE'] diff --git a/src/seastar/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map b/src/seastar/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map new file mode 100644 index 000000000..8131c9e30 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/dpaax/rte_common_dpaax_version.map @@ -0,0 +1,11 @@ +DPDK_18.11 { + global: + + dpaax_iova_table_update; + dpaax_iova_table_depopulate; + dpaax_iova_table_dump; + dpaax_iova_table_p; + dpaax_iova_table_populate; + + local: *; +}; diff --git a/src/seastar/dpdk/drivers/common/meson.build b/src/seastar/dpdk/drivers/common/meson.build new file mode 100644 index 000000000..a50934108 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/meson.build @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc + +std_deps = ['eal'] +drivers = ['cpt', 'dpaax', 'mvep', 'octeontx', 'qat'] +config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON' +driver_name_fmt = 'rte_common_@0@' diff --git a/src/seastar/dpdk/drivers/common/mvep/Makefile b/src/seastar/dpdk/drivers/common/mvep/Makefile new file mode 100644 index 000000000..1f5f005d9 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/mvep/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_common_mvep.a + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_common_mvep_version.map + +# external library dependencies +CFLAGS += -I$($RTE_SDK)/drivers/common/mvep +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 +LDLIBS += -L$(LIBMUSDK_PATH)/lib +LDLIBS += -lmusdk +LDLIBS += -lrte_eal -lrte_kvargs + +# library source files +SRCS-y += mvep_common.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/common/mvep/meson.build b/src/seastar/dpdk/drivers/common/mvep/meson.build new file mode 100644 index 000000000..8ccfacb3f --- /dev/null +++ b/src/seastar/dpdk/drivers/common/mvep/meson.build @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. +# +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false) +if not lib.found() + build = false +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC'] +endif + +sources = files('mvep_common.c') diff --git a/src/seastar/dpdk/drivers/common/mvep/mvep_common.c b/src/seastar/dpdk/drivers/common/mvep/mvep_common.c new file mode 100644 index 000000000..67fa65b57 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/mvep/mvep_common.c @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + */ + +#include <rte_common.h> + +#include <env/mv_autogen_comp_flags.h> +#include <env/mv_sys_dma.h> + +#include "rte_mvep_common.h" + +/* Memory size (in bytes) for MUSDK dma buffers */ +#define MRVL_MUSDK_DMA_MEMSIZE (40 * 1024 * 1024) + +struct mvep { + uint32_t ref_count; +}; + +static struct mvep mvep; + +int rte_mvep_init(enum mvep_module_type module __rte_unused, + struct rte_kvargs *kvlist __rte_unused) +{ + int ret; + + if (!mvep.ref_count) { + ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); + if (ret) + return ret; + } + + mvep.ref_count++; + + return 0; +} + +int rte_mvep_deinit(enum mvep_module_type module __rte_unused) +{ + mvep.ref_count--; + + if (!mvep.ref_count) + mv_sys_dma_mem_destroy(); + + return 0; +} diff --git a/src/seastar/dpdk/drivers/common/mvep/rte_common_mvep_version.map b/src/seastar/dpdk/drivers/common/mvep/rte_common_mvep_version.map new file mode 100644 index 000000000..c71722d79 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/mvep/rte_common_mvep_version.map @@ -0,0 +1,6 @@ +DPDK_18.11 { + global: + + rte_mvep_init; + rte_mvep_deinit; +}; diff --git a/src/seastar/dpdk/drivers/common/mvep/rte_mvep_common.h b/src/seastar/dpdk/drivers/common/mvep/rte_mvep_common.h new file mode 100644 index 000000000..0593cefcd --- /dev/null +++ b/src/seastar/dpdk/drivers/common/mvep/rte_mvep_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + */ + +#ifndef __RTE_MVEP_COMMON_H__ +#define __RTE_MVEP_COMMON_H__ + +#include <rte_kvargs.h> + +enum mvep_module_type { + MVEP_MOD_T_NONE = 0, + MVEP_MOD_T_PP2, + MVEP_MOD_T_SAM, + MVEP_MOD_T_NETA, + MVEP_MOD_T_LAST +}; + +int rte_mvep_init(enum mvep_module_type module, struct rte_kvargs *kvlist); +int rte_mvep_deinit(enum mvep_module_type module); + +#endif /* __RTE_MVEP_COMMON_H__ */ diff --git a/src/seastar/dpdk/drivers/common/octeontx/Makefile b/src/seastar/dpdk/drivers/common/octeontx/Makefile new file mode 100644 index 000000000..dfdb9f196 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/octeontx/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_common_octeontx.a + +CFLAGS += $(WERROR_FLAGS) +EXPORT_MAP := rte_common_octeontx_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-y += octeontx_mbox.c + +LDLIBS += -lrte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/common/octeontx/meson.build b/src/seastar/dpdk/drivers/common/octeontx/meson.build new file mode 100644 index 000000000..203d1ef49 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/octeontx/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cavium, Inc +# + +sources = files('octeontx_mbox.c') diff --git a/src/seastar/dpdk/drivers/common/octeontx/octeontx_mbox.c b/src/seastar/dpdk/drivers/common/octeontx/octeontx_mbox.c new file mode 100644 index 000000000..880f8a40f --- /dev/null +++ b/src/seastar/dpdk/drivers/common/octeontx/octeontx_mbox.c @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include <string.h> + +#include <rte_atomic.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_io.h> +#include <rte_spinlock.h> + +#include "octeontx_mbox.h" + +/* Mbox operation timeout in seconds */ +#define MBOX_WAIT_TIME_SEC 3 +#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */) + +/* Mbox channel state */ +enum { + MBOX_CHAN_STATE_REQ = 1, + MBOX_CHAN_STATE_RES = 0, +}; + +/* Response messages */ +enum { + MBOX_RET_SUCCESS, + MBOX_RET_INVALID, + MBOX_RET_INTERNAL_ERR, +}; + +struct mbox { + int init_once; + uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */ + uint8_t *reg; /* Store to this register triggers PF mbox interrupt */ + uint16_t tag_own; /* Last tag which was written to own channel */ + rte_spinlock_t lock; +}; + +static struct mbox octeontx_mbox; + +/* + * Structure used for mbox synchronization + * This structure sits at the begin of Mbox RAM and used as main + * synchronization point for channel communication + */ +struct mbox_ram_hdr { + union { + uint64_t u64; + struct { + uint8_t chan_state : 1; + uint8_t coproc : 7; + uint8_t msg; + uint8_t vfid; + uint8_t res_code; + uint16_t tag; + uint16_t len; + }; + }; +}; + +int octeontx_logtype_mbox; + +RTE_INIT(otx_init_log) +{ + octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox"); + if (octeontx_logtype_mbox >= 0) + rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE); +} + +static inline void +mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size) +{ + uint16_t i; + + for (i = 0; i < size; i++) + d[i] = s[i]; +} + +static inline void +mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr, + const void *txmsg, uint16_t txsize) +{ + struct mbox_ram_hdr old_hdr; + struct mbox_ram_hdr new_hdr = { {0} }; + uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; + uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); + + /* + * Initialize the channel with the tag left by last send. + * On success full mbox send complete, PF increments the tag by one. + * The sender can validate integrity of PF message with this scheme + */ + old_hdr.u64 = rte_read64(ram_mbox_hdr); + m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */ + + /* Copy msg body */ + if (txmsg) + mbox_msgcpy(ram_mbox_msg, txmsg, txsize); + + /* Prepare new hdr */ + new_hdr.chan_state = MBOX_CHAN_STATE_REQ; + new_hdr.coproc = hdr->coproc; + new_hdr.msg = hdr->msg; + new_hdr.vfid = hdr->vfid; + new_hdr.tag = m->tag_own; + new_hdr.len = txsize; + + /* Write the msg header */ + rte_write64(new_hdr.u64, ram_mbox_hdr); + rte_smp_wmb(); + /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */ + rte_write64(0, m->reg); +} + +static inline int +mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr, + void *rxmsg, uint16_t rxsize) +{ + int res = 0, wait; + uint16_t len; + struct mbox_ram_hdr rx_hdr; + uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; + uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); + + /* Wait for response */ + wait = MBOX_WAIT_TIME_SEC * 1000 * 10; + while (wait > 0) { + rte_delay_us(100); + rx_hdr.u64 = rte_read64(ram_mbox_hdr); + if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES) + break; + --wait; + } + + hdr->res_code = rx_hdr.res_code; + m->tag_own++; + + /* Timeout */ + if (wait <= 0) { + res = -ETIMEDOUT; + goto error; + } + + /* Tag mismatch */ + if (m->tag_own != rx_hdr.tag) { + res = -EINVAL; + goto error; + } + + /* PF nacked the msg */ + if (rx_hdr.res_code != MBOX_RET_SUCCESS) { + res = -EBADMSG; + goto error; + } + + len = RTE_MIN(rx_hdr.len, rxsize); + if (rxmsg) + mbox_msgcpy(rxmsg, ram_mbox_msg, len); + + return len; + +error: + mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)", + m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res, + hdr->res_code); + return res; +} + +static inline int +mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg, + uint16_t txsize, void *rxmsg, uint16_t rxsize) +{ + int res = -EINVAL; + + if (m->init_once == 0 || hdr == NULL || + txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) { + mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d", + m->init_once, hdr, txsize, rxsize); + return res; + } + + rte_spinlock_lock(&m->lock); + + mbox_send_request(m, hdr, txmsg, txsize); + res = mbox_wait_response(m, hdr, rxmsg, rxsize); + + rte_spinlock_unlock(&m->lock); + return res; +} + +int +octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base) +{ + struct mbox *m = &octeontx_mbox; + + if (m->init_once) + return -EALREADY; + + if (ram_mbox_base == NULL) { + mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base); + return -EINVAL; + } + + m->ram_mbox_base = ram_mbox_base; + + if (m->reg != NULL) { + rte_spinlock_init(&m->lock); + m->init_once = 1; + } + + return 0; +} + +int +octeontx_mbox_set_reg(uint8_t *reg) +{ + struct mbox *m = &octeontx_mbox; + + if (m->init_once) + return -EALREADY; + + if (reg == NULL) { + mbox_log_err("Invalid reg=%p", reg); + return -EINVAL; + } + + m->reg = reg; + + if (m->ram_mbox_base != NULL) { + rte_spinlock_init(&m->lock); + m->init_once = 1; + } + + return 0; +} + +int +octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata, + uint16_t txlen, void *rxdata, uint16_t rxlen) +{ + struct mbox *m = &octeontx_mbox; + + RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EINVAL; + + return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen); +} diff --git a/src/seastar/dpdk/drivers/common/octeontx/octeontx_mbox.h b/src/seastar/dpdk/drivers/common/octeontx/octeontx_mbox.h new file mode 100644 index 000000000..43fbda282 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/octeontx/octeontx_mbox.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_MBOX_H__ +#define __OCTEONTX_MBOX_H__ + +#include <rte_common.h> +#include <rte_spinlock.h> + +#define SSOW_BAR4_LEN (64 * 1024) +#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3)) + +#define MBOX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\ + "%s() line %u: " fmt "\n", __func__, __LINE__, ## args) + +#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__) +#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__) +#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__) +#define mbox_func_trace mbox_log_dbg + +extern int octeontx_logtype_mbox; + +struct octeontx_mbox_hdr { + uint16_t vfid; /* VF index or pf resource index local to the domain */ + uint8_t coproc; /* Coprocessor id */ + uint8_t msg; /* Message id */ + uint8_t res_code; /* Functional layer response code */ +}; + +int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base); +int octeontx_mbox_set_reg(uint8_t *reg); +int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, + void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen); + +#endif /* __OCTEONTX_MBOX_H__ */ diff --git a/src/seastar/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map b/src/seastar/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map new file mode 100644 index 000000000..f04b3b7f8 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/octeontx/rte_common_octeontx_version.map @@ -0,0 +1,7 @@ +DPDK_18.05 { + global: + + octeontx_mbox_set_ram_mbox_base; + octeontx_mbox_set_reg; + octeontx_mbox_send; +}; diff --git a/src/seastar/dpdk/drivers/common/qat/Makefile b/src/seastar/dpdk/drivers/common/qat/Makefile new file mode 100644 index 000000000..792058db8 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015-2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# build directories +QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat +QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat +VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR) + +# external library include paths +CFLAGS += -I$(SRCDIR)/qat_adf +CFLAGS += -I$(SRCDIR) +CFLAGS += -I$(QAT_CRYPTO_DIR) +CFLAGS += -I$(QAT_COMPRESS_DIR) + + +ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y) + CFLAGS += -DALLOW_EXPERIMENTAL_API + LDLIBS += -lrte_compressdev + SRCS-y += qat_comp.c + SRCS-y += qat_comp_pmd.c + build_qat = yes +endif + +# library symmetric crypto source files +ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y) +ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_ASYM),y) + LDLIBS += -lrte_cryptodev + LDLIBS += -lcrypto + CFLAGS += -DBUILD_QAT_ASYM + SRCS-y += qat_asym.c + SRCS-y += qat_asym_pmd.c + build_qat = yes +endif +ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y) + LDLIBS += -lrte_cryptodev + LDLIBS += -lcrypto + CFLAGS += -DBUILD_QAT_SYM + SRCS-y += qat_sym.c + SRCS-y += qat_sym_session.c + SRCS-y += qat_sym_pmd.c + build_qat = yes +endif +endif + +ifdef build_qat + + # library name + LIB = librte_pmd_qat.a + + # library version + LIBABIVER := 1 + # build flags + CFLAGS += $(WERROR_FLAGS) + CFLAGS += -O3 + + # library common source files + SRCS-y += qat_device.c + SRCS-y += qat_common.c + SRCS-y += qat_logs.c + SRCS-y += qat_qp.c + + LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool + LDLIBS += -lrte_pci -lrte_bus_pci + + # export include files + SYMLINK-y-include += + + # versioning export map + EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/common/qat/meson.build b/src/seastar/dpdk/drivers/common/qat/meson.build new file mode 100644 index 000000000..80b6b25a8 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017-2018 Intel Corporation + +# This does not build a driver, but instead holds common files for +# the crypto and compression drivers. +build = false +qat_deps = ['bus_pci'] +qat_sources = files('qat_common.c', + 'qat_qp.c', + 'qat_device.c', + 'qat_logs.c') +qat_includes = [include_directories('.', 'qat_adf')] +qat_ext_deps = [] +qat_cflags = [] diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h new file mode 100644 index 000000000..1eef5513f --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef ADF_TRANSPORT_ACCESS_MACROS_H +#define ADF_TRANSPORT_ACCESS_MACROS_H + +#include <rte_io.h> + +/* CSR write macro */ +#define ADF_CSR_WR(csrAddr, csrOffset, val) \ + rte_write32(val, (((uint8_t *)csrAddr) + csrOffset)) + +/* CSR read macro */ +#define ADF_CSR_RD(csrAddr, csrOffset) \ + rte_read32((((uint8_t *)csrAddr) + csrOffset)) + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A +#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 +#define ADF_COALESCING_MIN_TIME 0x1FF +#define ADF_COALESCING_MAX_TIME 0xFFFFF +#define ADF_COALESCING_DEF_TIME 0x27FF +#define ADF_RING_NEAR_WATERMARK_512 0x08 +#define ADF_RING_NEAR_WATERMARK_0 0x00 +#define ADF_RING_EMPTY_SIG 0x7F7F7F7F +#define ADF_RING_EMPTY_SIG_BYTE 0x7F + +/* Valid internal ring size values */ +#define ADF_RING_SIZE_128 0x01 +#define ADF_RING_SIZE_256 0x02 +#define ADF_RING_SIZE_512 0x03 +#define ADF_RING_SIZE_4K 0x06 +#define ADF_RING_SIZE_16K 0x08 +#define ADF_RING_SIZE_4M 0x10 +#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 +#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M +#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K + +/* Maximum number of qps on a device for any service type */ +#define ADF_MAX_QPS_ON_ANY_SERVICE 2 +#define ADF_RING_DIR_TX 0 +#define ADF_RING_DIR_RX 1 + +/* Valid internal msg size values */ +#define ADF_MSG_SIZE_32 0x01 +#define ADF_MSG_SIZE_64 0x02 +#define ADF_MSG_SIZE_128 0x04 +#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 +#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 + +/* Size to bytes conversion macros for ring and msg size values */ +#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) +#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) +#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) +#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) + +/* Minimum ring bufer size for memory allocation */ +#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ + ADF_RING_SIZE_4K : SIZE) +#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ + SIZE) & ~0x4) +/* Max outstanding requests */ +#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ + ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1) +#define BUILD_RING_CONFIG(size) \ + ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ + ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_CONFIG + (ring << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + uint32_t l_base = 0, u_base = 0; \ + l_base = (uint32_t)(value & 0xFFFFFFFF); \ + u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ +} while (0) +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | value) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */ diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h new file mode 100644 index 000000000..8f7cb37b4 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_H_ +#define _ICP_QAT_FW_H_ +#include <sys/types.h> +#include "icp_qat_hw.h" + +#define QAT_FIELD_SET(flags, val, bitpos, mask) \ +{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ + (((val) & (mask)) << (bitpos))) ; } + +#define QAT_FIELD_GET(flags, bitpos, mask) \ + (((flags) >> (bitpos)) & (mask)) + +#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 +#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 +#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 +#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF +#define ICP_QAT_FW_NUM_LONGWORDS_1 1 +#define ICP_QAT_FW_NUM_LONGWORDS_2 2 +#define ICP_QAT_FW_NUM_LONGWORDS_3 3 +#define ICP_QAT_FW_NUM_LONGWORDS_4 4 +#define ICP_QAT_FW_NUM_LONGWORDS_5 5 +#define ICP_QAT_FW_NUM_LONGWORDS_6 6 +#define ICP_QAT_FW_NUM_LONGWORDS_7 7 +#define ICP_QAT_FW_NUM_LONGWORDS_10 10 +#define ICP_QAT_FW_NUM_LONGWORDS_13 13 +#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 + +enum icp_qat_fw_comn_resp_serv_id { + ICP_QAT_FW_COMN_RESP_SERV_NULL, + ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, + ICP_QAT_FW_COMN_RESP_SERV_DELIMITER +}; + +enum icp_qat_fw_comn_request_id { + ICP_QAT_FW_COMN_REQ_NULL = 0, + ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, + ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, + ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, + ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, + ICP_QAT_FW_COMN_REQ_DELIMITER +}; + +struct icp_qat_fw_comn_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t serv_specif_fields[4]; + } s1; + } u; +}; + +struct icp_qat_fw_comn_req_mid { + uint64_t opaque_data; + uint64_t src_data_addr; + uint64_t dest_data_addr; + uint32_t src_length; + uint32_t dst_length; +}; + +struct icp_qat_fw_comn_req_cd_ctrl { + uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; +}; + +struct icp_qat_fw_comn_req_hdr { + uint8_t resrvd1; + uint8_t service_cmd_id; + uint8_t service_type; + uint8_t hdr_flags; + uint16_t serv_specif_flags; + uint16_t comn_req_flags; +}; + +struct icp_qat_fw_comn_req_rqpars { + uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; +}; + +struct icp_qat_fw_comn_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +struct icp_qat_fw_comn_error { + uint8_t xlat_err_code; + uint8_t cmp_err_code; +}; + +struct icp_qat_fw_comn_resp_hdr { + uint8_t resrvd1; + uint8_t service_id; + uint8_t response_type; + uint8_t hdr_flags; + struct icp_qat_fw_comn_error comn_error; + uint8_t comn_status; + uint8_t cmd_id; +}; + +struct icp_qat_fw_comn_resp { + struct icp_qat_fw_comn_resp_hdr comn_hdr; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 +#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F +#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6 +#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1 + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_type + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_type = val + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id = val + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) + +#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNV_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ + (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) + +#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ + (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) + +#define QAT_COMN_PTR_TYPE_BITPOS 0 +#define QAT_COMN_PTR_TYPE_MASK 0x1 +#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 +#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 +#define QAT_COMN_PTR_TYPE_FLAT 0x0 +#define QAT_COMN_PTR_TYPE_SGL 0x1 +#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 +#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 + +#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ + ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ + | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) + +#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ + QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 +#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 +#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 +#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F + +#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } + +#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } + +#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \ + do { \ + (next_curr_id) = \ + (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK)) \ + } while (0) + +#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \ + do { \ + (next_curr_id) = \ + (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \ + } while (0) + +#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 +#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 +#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 +#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 +#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 +#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 +#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2 +#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1 +#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0 +#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1 + +#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ + QAT_COMN_RESP_CRYPTO_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \ + QAT_COMN_RESP_PKE_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ + QAT_COMN_RESP_CMP_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ + QAT_COMN_RESP_XLAT_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \ + QAT_COMN_RESP_XLT_WA_APPLIED_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) + +#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \ + QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK) + +#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 +#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 +#define ERR_CODE_NO_ERROR 0 +#define ERR_CODE_INVALID_BLOCK_TYPE -1 +#define ERR_CODE_NO_MATCH_ONES_COMP -2 +#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 +#define ERR_CODE_INCOMPLETE_LEN -4 +#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 +#define ERR_CODE_RPT_GT_SPEC_LEN -6 +#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 +#define ERR_CODE_INV_DIS_CODE_LEN -8 +#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 +#define ERR_CODE_DIS_TOO_FAR_BACK -10 +#define ERR_CODE_OVERFLOW_ERROR -11 +#define ERR_CODE_SOFT_ERROR -12 +#define ERR_CODE_FATAL_ERROR -13 +#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14 +#define ERR_CODE_HW_INCOMPLETE_FILE -15 +#define ERR_CODE_SSM_ERROR -16 +#define ERR_CODE_ENDPOINT_ERROR -17 +#define ERR_CODE_CNV_ERROR -18 +#define ERR_CODE_EMPTY_DYM_BLOCK -19 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22 +#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23 + +enum icp_qat_fw_slice { + ICP_QAT_FW_SLICE_NULL = 0, + ICP_QAT_FW_SLICE_CIPHER = 1, + ICP_QAT_FW_SLICE_AUTH = 2, + ICP_QAT_FW_SLICE_DRAM_RD = 3, + ICP_QAT_FW_SLICE_DRAM_WR = 4, + ICP_QAT_FW_SLICE_COMP = 5, + ICP_QAT_FW_SLICE_XLAT = 6, + ICP_QAT_FW_SLICE_DELIMITER +}; +#endif diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h new file mode 100644 index 000000000..813817720 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_comp.h @@ -0,0 +1,482 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_COMP_H_ +#define _ICP_QAT_FW_COMP_H_ + +#include "icp_qat_fw.h" + +enum icp_qat_fw_comp_cmd_id { + ICP_QAT_FW_COMP_CMD_STATIC = 0, + /*!< Static Compress Request */ + + ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, + /*!< Dynamic Compress Request */ + + ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, + /*!< Decompress Request */ + + ICP_QAT_FW_COMP_CMD_DELIMITER + /**< Delimiter type */ +}; + +/**< Flag usage */ + +#define ICP_QAT_FW_COMP_STATELESS_SESSION 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that session is stateless + */ + +#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that session is stateful + */ + +#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing secure RAM from being used as + * an intermediate buffer is DISABLED. + */ + +#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing secure RAM from being used as + * an intermediate buffer is ENABLED. + */ + +/**< Flag mask & bit position */ + +#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the session type + */ + +#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine the session type + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for auto select best + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for auto select best + */ + +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for enhanced auto select best + */ + +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for enhanced auto select best + */ + +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for disabling type zero header write back + * when Enhanced autoselect best is enabled. If set firmware does + * not return type0 store block header, only copies src to dest. + * (if best output is Type0) + */ + +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for auto select best + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for flag used to disable secure ram from + * being used as an intermediate buffer. + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for disable secure ram for use as an intermediate + * buffer. + */ + +#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \ + ret_uncomp, secure_ram) \ + ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ + << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ + (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \ + (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \ + (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \ + << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \ + (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \ + << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS)) + +union icp_qat_fw_comp_req_hdr_cd_pars { + /**< LWs 2-5 */ + struct { + uint64_t content_desc_addr; + /**< Address of the content descriptor */ + + uint16_t content_desc_resrvd1; + /**< Content descriptor reserved field */ + + uint8_t content_desc_params_sz; + /**< Size of the content descriptor parameters in quad words. + * These parameters describe the session setup configuration + * info for the slices that this request relies upon i.e. + * the configuration word and cipher key needed by the cipher + * slice if there is a request for cipher processing. + */ + + uint8_t content_desc_hdr_resrvd2; + /**< Content descriptor reserved field */ + + uint32_t content_desc_resrvd3; + /**< Content descriptor reserved field */ + } s; + + struct { + uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2]; + /* Compression Slice Config Word */ + + uint32_t content_desc_resrvd4; + /**< Content descriptor reserved field */ + + } sl; + +}; + +struct icp_qat_fw_comp_req_params { + /**< LW 14 */ + uint32_t comp_len; + /**< Size of input to process in bytes Note: Only EOP requests can be + * odd for decompression. IA must set LSB to zero for odd sized + * intermediate inputs + */ + + /**< LW 15 */ + uint32_t out_buffer_sz; + /**< Size of output buffer in bytes */ + + /**< LW 16 */ + uint32_t initial_crc32; + /**< CRC of previously processed bytes */ + + /**< LW 17 */ + uint32_t initial_adler; + /**< Adler of previously processed bytes */ + + /**< LW 18 */ + uint32_t req_par_flags; + + /**< LW 19 */ + uint32_t rsrvd; +}; + +#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \ + ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \ + (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \ + (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \ + << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \ + ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \ + ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \ + << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS)) + +#define ICP_QAT_FW_COMP_NOT_SOP 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request is NOT Start of Packet + */ + +#define ICP_QAT_FW_COMP_SOP 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request IS Start of Packet + */ + +#define ICP_QAT_FW_COMP_NOT_EOP 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request is NOT Start of Packet + */ + +#define ICP_QAT_FW_COMP_EOP 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request IS End of Packet + */ + +#define ICP_QAT_FW_COMP_NOT_BFINAL 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing to indicate firmware this is not the last block + */ + +#define ICP_QAT_FW_COMP_BFINAL 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing to indicate firmware this is the last block + */ + +#define ICP_QAT_FW_COMP_NO_CNV 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO cnv check is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_CNV 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that a cnv check IS to be performed on the request + */ + +#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO cnv recovery is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that a cnv recovery is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_SOP_BITPOS 0 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for SOP + */ + +#define ICP_QAT_FW_COMP_SOP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine SOP + */ + +#define ICP_QAT_FW_COMP_EOP_BITPOS 1 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for EOP + */ + +#define ICP_QAT_FW_COMP_EOP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine EOP + */ + +#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the bfinal bit + */ + +#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the bfinal bit + */ + +#define ICP_QAT_FW_COMP_CNV_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV bit + */ + +#define ICP_QAT_FW_COMP_CNV_BITPOS 16 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV bit + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV Recovery bit + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV Recovery bit + */ + +struct icp_qat_fw_xlt_req_params { + /**< LWs 20-21 */ + uint64_t inter_buff_ptr; + /**< This field specifies the physical address of an intermediate + * buffer SGL array. The array contains a pair of 64-bit + * intermediate buffer pointers to SGL buffer descriptors, one pair + * per CPM. Please refer to the CPM1.6 Firmware Interface HLD + * specification for more details. + */ +}; + + +struct icp_qat_fw_comp_cd_hdr { + /**< LW 24 */ + uint16_t ram_bank_flags; + /**< Flags to show which ram banks to access */ + + uint8_t comp_cfg_offset; + /**< Quad word offset from the content descriptor parameters address + * to the parameters for the compression processing + */ + + uint8_t next_curr_id; + /**< This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the compressed data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after compression + * Current Id: Initialised with the compression slice type + */ + + /**< LW 25 */ + uint32_t resrvd; + /**< LWs 26-27 */ + + uint64_t comp_state_addr; + /**< Pointer to compression state */ + + /**< LWs 28-29 */ + uint64_t ram_banks_addr; + /**< Pointer to banks */ + +}; + + +struct icp_qat_fw_xlt_cd_hdr { + /**< LW 30 */ + uint16_t resrvd1; + /**< Reserved field and assumed set to 0 */ + + uint8_t resrvd2; + /**< Reserved field and assumed set to 0 */ + + uint8_t next_curr_id; + /**< This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the translated data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after compression + * Current Id: Initialised with the translation slice type + */ + + /**< LW 31 */ + uint32_t resrvd3; + /**< Reserved and should be set to zero, needed for quadword + * alignment + */ +}; + +struct icp_qat_fw_comp_req { + /**< LWs 0-1 */ + struct icp_qat_fw_comn_req_hdr comn_hdr; + /**< Common request header - for Service Command Id, + * use service-specific Compression Command Id. + * Service Specific Flags - use Compression Command Flags + */ + + /**< LWs 2-5 */ + union icp_qat_fw_comp_req_hdr_cd_pars cd_pars; + /**< Compression service-specific content descriptor field which points + * either to a content descriptor parameter block or contains the + * compression slice config word. + */ + + /**< LWs 6-13 */ + struct icp_qat_fw_comn_req_mid comn_mid; + /**< Common request middle section */ + + /**< LWs 14-19 */ + struct icp_qat_fw_comp_req_params comp_pars; + /**< Compression request Parameters block */ + + /**< LWs 20-21 */ + union { + struct icp_qat_fw_xlt_req_params xlt_pars; + /**< Translation request Parameters block */ + uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved if not used for translation */ + + } u1; + + /**< LWs 22-23 */ + union { + uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved - not used if Batch and Pack is disabled.*/ + + uint64_t bnp_res_table_addr; + /**< A generic pointer to the unbounded list of + * icp_qat_fw_resp_comp_pars members. This pointer is only + * used when the Batch and Pack is enabled. + */ + } u3; + + /**< LWs 24-29 */ + struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl; + /**< Compression request content descriptor control block header */ + + /**< LWs 30-31 */ + union { + struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl; + /**< Translation request content descriptor + * control block header + */ + + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved if not used for translation */ + } u2; +}; + +struct icp_qat_fw_resp_comp_pars { + /**< LW 4 */ + uint32_t input_byte_counter; + /**< Input byte counter */ + + /**< LW 5 */ + uint32_t output_byte_counter; + /**< Output byte counter */ + + /**< LW 6 & 7*/ + union { + uint64_t curr_chksum; + struct { + /**< LW 6 */ + uint32_t curr_crc32; + /**< LW 7 */ + uint32_t curr_adler_32; + }; + }; +}; + +struct icp_qat_fw_comp_resp { + /**< LWs 0-1 */ + struct icp_qat_fw_comn_resp_hdr comn_resp; + /**< Common interface response format see icp_qat_fw.h */ + + /**< LWs 2-3 */ + uint64_t opaque_data; + /**< Opaque data passed from the request to the response message */ + + /**< LWs 4-7 */ + struct icp_qat_fw_resp_comp_pars comp_resp_pars; + /**< Common response params (checksums and byte counts) */ +}; + +#endif diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h new file mode 100644 index 000000000..c33bc3fe7 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_la.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_LA_H_ +#define _ICP_QAT_FW_LA_H_ +#include "icp_qat_fw.h" + +enum icp_qat_fw_la_cmd_id { + ICP_QAT_FW_LA_CMD_CIPHER = 0, + ICP_QAT_FW_LA_CMD_AUTH = 1, + ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, + ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, + ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, + ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, + ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, + ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, + ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, + ICP_QAT_FW_LA_CMD_MGF1 = 9, + ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, + ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, + ICP_QAT_FW_LA_CMD_DELIMITER = 12 +}; + +#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR +#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR + +struct icp_qat_fw_la_bulk_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 +#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 +#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 +#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 +#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 +#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 +#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 +#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 +#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 +#define ICP_QAT_FW_LA_GCM_PROTO 2 +#define ICP_QAT_FW_LA_CCM_PROTO 1 +#define ICP_QAT_FW_LA_NO_PROTO 0 +#define QAT_LA_PROTO_BITPOS 7 +#define QAT_LA_PROTO_MASK 0x7 +#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 +#define QAT_LA_CMP_AUTH_RES_BITPOS 6 +#define QAT_LA_CMP_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_RET_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 +#define QAT_LA_RET_AUTH_RES_BITPOS 5 +#define QAT_LA_RET_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_UPDATE_STATE 1 +#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 +#define QAT_LA_UPDATE_STATE_BITPOS 4 +#define QAT_LA_UPDATE_STATE_MASK 0x1 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 +#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 +#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 +#define QAT_LA_CIPH_IV_FLD_BITPOS 2 +#define QAT_LA_CIPH_IV_FLD_MASK 0x1 +#define ICP_QAT_FW_LA_PARTIAL_NONE 0 +#define ICP_QAT_FW_LA_PARTIAL_START 1 +#define ICP_QAT_FW_LA_PARTIAL_MID 3 +#define ICP_QAT_FW_LA_PARTIAL_END 2 +#define QAT_LA_PARTIAL_BITPOS 0 +#define QAT_LA_PARTIAL_MASK 0x3 +#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ + cmp_auth, ret_auth, update_state, \ + ciph_iv, ciphcfg, partial) \ + (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ + ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ + QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ + ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ + QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ + ((proto & QAT_LA_PROTO_MASK) << \ + QAT_LA_PROTO_BITPOS) | \ + ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ + QAT_LA_CMP_AUTH_RES_BITPOS) | \ + ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ + QAT_LA_RET_AUTH_RES_BITPOS) | \ + ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ + QAT_LA_UPDATE_STATE_BITPOS) | \ + ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ + QAT_LA_CIPH_IV_FLD_BITPOS) | \ + ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ + ((partial & QAT_LA_PARTIAL_MASK) << \ + QAT_LA_PARTIAL_BITPOS)) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ + QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +struct icp_qat_fw_cipher_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } s1; + } u; +}; + +struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } sl; + } u; +}; + +struct icp_qat_fw_cipher_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id; + uint8_t cipher_padding_sz; + uint8_t resrvd1; + uint16_t resrvd2; + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; +}; + +struct icp_qat_fw_auth_cd_ctrl_hdr { + uint32_t resrvd1; + uint8_t resrvd2; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id; + uint8_t resrvd3; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd4; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id_cipher; + uint8_t cipher_padding_sz; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id_auth; + uint8_t resrvd1; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd2; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 +#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 +#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 +#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ + (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) +#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) + +struct icp_qat_fw_la_cipher_req_params { + uint32_t cipher_offset; + uint32_t cipher_length; + union { + uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint64_t cipher_IV_ptr; + uint64_t resrvd1; + } s; + } u; +}; + +struct icp_qat_fw_la_auth_req_params { + uint32_t auth_off; + uint32_t auth_len; + union { + uint64_t auth_partial_st_prefix; + uint64_t aad_adr; + } u1; + uint64_t auth_res_addr; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint8_t hash_state_sz; + uint8_t auth_res_sz; +} __rte_packed; + +struct icp_qat_fw_la_auth_req_params_resrvd_flds { + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint16_t resrvd2; +}; + +struct icp_qat_fw_la_resp { + struct icp_qat_fw_comn_resp_hdr comn_resp; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#endif diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h new file mode 100644 index 000000000..00813cffb --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h @@ -0,0 +1,1538 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +/** + * @file icp_qat_fw_mmp_ids.h + * @ingroup icp_qat_fw_mmp + * @brief + * This file documents the external interfaces that the QAT FW running + * on the QAT Acceleration Engine provides to clients wanting to + * accelerate crypto asymmetric applications + */ + +#ifndef _ICP_QAT_FW_MMP_IDS_ +#define _ICP_QAT_FW_MMP_IDS_ + +#define PKE_INIT 0x09061a09 +/**< Functionality ID for Initialisation sequence + * @li 1 input parameters : @link icp_qat_fw_mmp_init_input::z z @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_init_output::zz zz @endlink + */ +#define PKE_DH_G2_768 0x1c0b1a10 +/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for + *768-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_768_input::e e + * @endlink @link icp_qat_fw_mmp_dh_g2_768_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_768_output::r r + * @endlink + */ +#define PKE_DH_768 0x210c1a1b +/**< Functionality ID for Diffie-Hellman Modular exponentiation for 768-bit + *numbers + * @li 3 input parameters : @link icp_qat_fw_mmp_dh_768_input::g g @endlink + * @link icp_qat_fw_mmp_dh_768_input::e e @endlink @link + * icp_qat_fw_mmp_dh_768_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_768_output::r r @endlink + */ +#define PKE_DH_G2_1024 0x220b1a27 +/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for + * 1024-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_1024_input::e e + * @endlink @link icp_qat_fw_mmp_dh_g2_1024_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_1024_output::r r + * @endlink + */ +#define PKE_DH_1024 0x290c1a32 +/**< Functionality ID for Diffie-Hellman Modular exponentiation for 1024-bit + * numbers + * @li 3 input parameters : @link icp_qat_fw_mmp_dh_1024_input::g g @endlink + * @link icp_qat_fw_mmp_dh_1024_input::e e @endlink @link + * icp_qat_fw_mmp_dh_1024_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_1024_output::r r @endlink + */ +#define PKE_DH_G2_1536 0x2e0b1a3e +/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for + * 1536-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_1536_input::e e + * @endlink @link icp_qat_fw_mmp_dh_g2_1536_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_1536_output::r r + * @endlink + */ +#define PKE_DH_1536 0x390c1a49 +/**< Functionality ID for Diffie-Hellman Modular exponentiation for 1536-bit + * numbers + * @li 3 input parameters : @link icp_qat_fw_mmp_dh_1536_input::g g @endlink + * @link icp_qat_fw_mmp_dh_1536_input::e e @endlink @link + * icp_qat_fw_mmp_dh_1536_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_1536_output::r r @endlink + */ +#define PKE_DH_G2_2048 0x3e0b1a55 +/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for + * 2048-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_2048_input::e e + * @endlink @link icp_qat_fw_mmp_dh_g2_2048_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_2048_output::r r + * @endlink + */ +#define PKE_DH_2048 0x4d0c1a60 +/**< Functionality ID for Diffie-Hellman Modular exponentiation for 2048-bit + * numbers + * @li 3 input parameters : @link icp_qat_fw_mmp_dh_2048_input::g g @endlink + * @link icp_qat_fw_mmp_dh_2048_input::e e @endlink @link + * icp_qat_fw_mmp_dh_2048_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_2048_output::r r @endlink + */ +#define PKE_DH_G2_3072 0x3a0b1a6c +/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for + * 3072-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_3072_input::e e + * @endlink @link icp_qat_fw_mmp_dh_g2_3072_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_3072_output::r r + * @endlink + */ +#define PKE_DH_3072 0x510c1a77 +/**< Functionality ID for Diffie-Hellman Modular exponentiation for 3072-bit + * numbers + * @li 3 input parameters : @link icp_qat_fw_mmp_dh_3072_input::g g @endlink + * @link icp_qat_fw_mmp_dh_3072_input::e e @endlink @link + * icp_qat_fw_mmp_dh_3072_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_3072_output::r r @endlink + */ +#define PKE_DH_G2_4096 0x4a0b1a83 +/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for + * 4096-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_4096_input::e e + * @endlink @link icp_qat_fw_mmp_dh_g2_4096_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_4096_output::r r + * @endlink + */ +#define PKE_DH_4096 0x690c1a8e +/**< Functionality ID for Diffie-Hellman Modular exponentiation for 4096-bit + * numbers + * @li 3 input parameters : @link icp_qat_fw_mmp_dh_4096_input::g g @endlink + * @link icp_qat_fw_mmp_dh_4096_input::e e @endlink @link + * icp_qat_fw_mmp_dh_4096_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dh_4096_output::r r @endlink + */ +#define PKE_RSA_KP1_512 0x191d1a9a +/**< Functionality ID for RSA 512 key generation first form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_512_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp1_512_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp1_512_input::e e @endlink + * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_512_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp1_512_output::d d @endlink + */ +#define PKE_RSA_KP2_512 0x19401acc +/**< Functionality ID for RSA 512 key generation second form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_512_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp2_512_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp2_512_input::e e @endlink + * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_512_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp2_512_output::d d @endlink @link + * icp_qat_fw_mmp_rsa_kp2_512_output::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_kp2_512_output::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_kp2_512_output::qinv qinv @endlink + */ +#define PKE_RSA_EP_512 0x1c161b21 +/**< Functionality ID for RSA 512 Encryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_512_input::m m + * @endlink @link icp_qat_fw_mmp_rsa_ep_512_input::e e @endlink @link + * icp_qat_fw_mmp_rsa_ep_512_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_512_output::c c + * @endlink + */ +#define PKE_RSA_DP1_512 0x1c161b3c +/**< Functionality ID for RSA 512 Decryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_512_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp1_512_input::d d @endlink @link + * icp_qat_fw_mmp_rsa_dp1_512_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_512_output::m m + * @endlink + */ +#define PKE_RSA_DP2_512 0x1c131b57 +/**< Functionality ID for RSA 1024 Decryption with CRT + * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_512_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp2_512_input::p p @endlink @link + * icp_qat_fw_mmp_rsa_dp2_512_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_dp2_512_input::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_dp2_512_input::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_dp2_512_input::qinv qinv @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_512_output::m m + * @endlink + */ +#define PKE_RSA_KP1_1024 0x36181b71 +/**< Functionality ID for RSA 1024 key generation first form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_1024_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp1_1024_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp1_1024_input::e e @endlink + * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_1024_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp1_1024_output::d d @endlink + */ +#define PKE_RSA_KP2_1024 0x40451b9e +/**< Functionality ID for RSA 1024 key generation second form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_1024_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp2_1024_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1024_input::e e @endlink + * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_1024_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp2_1024_output::d d @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1024_output::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1024_output::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1024_output::qinv qinv @endlink + */ +#define PKE_RSA_EP_1024 0x35111bf7 +/**< Functionality ID for RSA 1024 Encryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_1024_input::m m + * @endlink @link icp_qat_fw_mmp_rsa_ep_1024_input::e e @endlink @link + * icp_qat_fw_mmp_rsa_ep_1024_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_1024_output::c c + * @endlink + */ +#define PKE_RSA_DP1_1024 0x35111c12 +/**< Functionality ID for RSA 1024 Decryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_1024_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp1_1024_input::d d @endlink @link + * icp_qat_fw_mmp_rsa_dp1_1024_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_1024_output::m m + * @endlink + */ +#define PKE_RSA_DP2_1024 0x26131c2d +/**< Functionality ID for RSA 1024 Decryption with CRT + * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_1024_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp2_1024_input::p p @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1024_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1024_input::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1024_input::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1024_input::qinv qinv @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_1024_output::m m + * @endlink + */ +#define PKE_RSA_KP1_1536 0x531d1c46 +/**< Functionality ID for RSA 1536 key generation first form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_1536_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp1_1536_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp1_1536_input::e e @endlink + * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_1536_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp1_1536_output::d d @endlink + */ +#define PKE_RSA_KP2_1536 0x32391c78 +/**< Functionality ID for RSA 1536 key generation second form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_1536_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp2_1536_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1536_input::e e @endlink + * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_1536_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp2_1536_output::d d @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1536_output::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1536_output::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_kp2_1536_output::qinv qinv @endlink + */ +#define PKE_RSA_EP_1536 0x4d111cdc +/**< Functionality ID for RSA 1536 Encryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_1536_input::m m + * @endlink @link icp_qat_fw_mmp_rsa_ep_1536_input::e e @endlink @link + * icp_qat_fw_mmp_rsa_ep_1536_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_1536_output::c c + * @endlink + */ +#define PKE_RSA_DP1_1536 0x4d111cf7 +/**< Functionality ID for RSA 1536 Decryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_1536_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp1_1536_input::d d @endlink @link + * icp_qat_fw_mmp_rsa_dp1_1536_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_1536_output::m m + *@endlink + */ +#define PKE_RSA_DP2_1536 0x45111d12 +/**< Functionality ID for RSA 1536 Decryption with CRT + * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_1536_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp2_1536_input::p p @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1536_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1536_input::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1536_input::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_dp2_1536_input::qinv qinv @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_1536_output::m m + * @endlink + */ +#define PKE_RSA_KP1_2048 0x72181d2e +/**< Functionality ID for RSA 2048 key generation first form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_2048_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp1_2048_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp1_2048_input::e e @endlink + * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_2048_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp1_2048_output::d d @endlink + */ +#define PKE_RSA_KP2_2048 0x42341d5b +/**< Functionality ID for RSA 2048 key generation second form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_2048_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp2_2048_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp2_2048_input::e e @endlink + * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_2048_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp2_2048_output::d d @endlink @link + * icp_qat_fw_mmp_rsa_kp2_2048_output::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_kp2_2048_output::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_kp2_2048_output::qinv qinv @endlink + */ +#define PKE_RSA_EP_2048 0x6e111dba +/**< Functionality ID for RSA 2048 Encryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_2048_input::m m + * @endlink @link icp_qat_fw_mmp_rsa_ep_2048_input::e e @endlink @link + * icp_qat_fw_mmp_rsa_ep_2048_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_2048_output::c c + * @endlink + */ +#define PKE_RSA_DP1_2048 0x6e111dda +/**< Functionality ID for RSA 2048 Decryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_2048_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp1_2048_input::d d @endlink @link + * icp_qat_fw_mmp_rsa_dp1_2048_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_2048_output::m m + * @endlink + */ +#define PKE_RSA_DP2_2048 0x59121dfa +/**< Functionality ID for RSA 2048 Decryption with CRT + * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_2048_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp2_2048_input::p p @endlink @link + * icp_qat_fw_mmp_rsa_dp2_2048_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_dp2_2048_input::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_dp2_2048_input::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_dp2_2048_input::qinv qinv @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_2048_output::m m + * @endlink + */ +#define PKE_RSA_KP1_3072 0x60191e16 +/**< Functionality ID for RSA 3072 key generation first form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_3072_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp1_3072_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp1_3072_input::e e @endlink + * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_3072_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp1_3072_output::d d @endlink + */ +#define PKE_RSA_KP2_3072 0x68331e45 +/**< Functionality ID for RSA 3072 key generation second form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_3072_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp2_3072_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp2_3072_input::e e @endlink + * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_3072_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp2_3072_output::d d @endlink @link + * icp_qat_fw_mmp_rsa_kp2_3072_output::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_kp2_3072_output::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_kp2_3072_output::qinv qinv @endlink + */ +#define PKE_RSA_EP_3072 0x7d111ea3 +/**< Functionality ID for RSA 3072 Encryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_3072_input::m m + * @endlink @link icp_qat_fw_mmp_rsa_ep_3072_input::e e @endlink @link + * icp_qat_fw_mmp_rsa_ep_3072_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_3072_output::c c + * @endlink + */ +#define PKE_RSA_DP1_3072 0x7d111ebe +/**< Functionality ID for RSA 3072 Decryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_3072_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp1_3072_input::d d @endlink @link + * icp_qat_fw_mmp_rsa_dp1_3072_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_3072_output::m m + * @endlink + */ +#define PKE_RSA_DP2_3072 0x81121ed9 +/**< Functionality ID for RSA 3072 Decryption with CRT + * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_3072_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp2_3072_input::p p @endlink @link + * icp_qat_fw_mmp_rsa_dp2_3072_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_dp2_3072_input::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_dp2_3072_input::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_dp2_3072_input::qinv qinv @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_3072_output::m m + * @endlink + */ +#define PKE_RSA_KP1_4096 0x7d1f1ef6 +/**< Functionality ID for RSA 4096 key generation first form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_4096_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp1_4096_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp1_4096_input::e e @endlink + * @li 2 output parameters : @link icp_qat_fw_mmp_rsa_kp1_4096_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp1_4096_output::d d @endlink + */ +#define PKE_RSA_KP2_4096 0x91251f27 +/**< Functionality ID for RSA 4096 key generation second form + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp2_4096_input::p p + * @endlink @link icp_qat_fw_mmp_rsa_kp2_4096_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_kp2_4096_input::e e @endlink + * @li 5 output parameters : @link icp_qat_fw_mmp_rsa_kp2_4096_output::n n + * @endlink @link icp_qat_fw_mmp_rsa_kp2_4096_output::d d @endlink @link + * icp_qat_fw_mmp_rsa_kp2_4096_output::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_kp2_4096_output::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_kp2_4096_output::qinv qinv @endlink + */ +#define PKE_RSA_EP_4096 0xa5101f7e +/**< Functionality ID for RSA 4096 Encryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_4096_input::m m + * @endlink @link icp_qat_fw_mmp_rsa_ep_4096_input::e e @endlink @link + * icp_qat_fw_mmp_rsa_ep_4096_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_4096_output::c c + * @endlink + */ +#define PKE_RSA_DP1_4096 0xa5101f98 +/**< Functionality ID for RSA 4096 Decryption + * @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_4096_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp1_4096_input::d d @endlink @link + * icp_qat_fw_mmp_rsa_dp1_4096_input::n n @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_4096_output::m m + * @endlink + */ +#define PKE_RSA_DP2_4096 0xb1111fb2 +/**< Functionality ID for RSA 4096 Decryption with CRT + * @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_4096_input::c c + * @endlink @link icp_qat_fw_mmp_rsa_dp2_4096_input::p p @endlink @link + * icp_qat_fw_mmp_rsa_dp2_4096_input::q q @endlink @link + * icp_qat_fw_mmp_rsa_dp2_4096_input::dp dp @endlink @link + * icp_qat_fw_mmp_rsa_dp2_4096_input::dq dq @endlink @link + * icp_qat_fw_mmp_rsa_dp2_4096_input::qinv qinv @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_4096_output::m m + * @endlink + */ +#define PKE_GCD_PT_192 0x19201fcd +/**< Functionality ID for GCD primality test for 192-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_192_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_256 0x19201ff7 +/**< Functionality ID for GCD primality test for 256-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_256_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_384 0x19202021 +/**< Functionality ID for GCD primality test for 384-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_384_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_512 0x1b1b204b +/**< Functionality ID for GCD primality test for 512-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_512_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_768 0x170c2070 +/**< Functionality ID for GCD primality test for 768-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_768_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_1024 0x130f2085 +/**< Functionality ID for GCD primality test for 1024-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_1024_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_1536 0x1d0c2094 +/**< Functionality ID for GCD primality test for 1536-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_1536_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_2048 0x210c20a5 +/**< Functionality ID for GCD primality test for 2048-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_2048_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_3072 0x290c20b6 +/**< Functionality ID for GCD primality test for 3072-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_3072_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_GCD_PT_4096 0x310c20c7 +/**< Functionality ID for GCD primality test for 4096-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_4096_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_160 0x0e1120d8 +/**< Functionality ID for Fermat primality test for 160-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_160_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_512 0x121120ee +/**< Functionality ID for Fermat primality test for 512-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_512_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_L512 0x19162104 +/**< Functionality ID for Fermat primality test for <e; 512-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_l512_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_768 0x19112124 +/**< Functionality ID for Fermat primality test for 768-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_768_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_1024 0x1f11213a +/**< Functionality ID for Fermat primality test for 1024-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_1024_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_1536 0x2b112150 +/**< Functionality ID for Fermat primality test for 1536-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_1536_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_2048 0x3b112166 +/**< Functionality ID for Fermat primality test for 2048-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_2048_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_3072 0x3a11217c +/**< Functionality ID for Fermat primality test for 3072-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_3072_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_FERMAT_PT_4096 0x4a112192 +/**< Functionality ID for Fermat primality test for 4096-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_fermat_pt_4096_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_MR_PT_160 0x0e1221a8 +/**< Functionality ID for Miller-Rabin primality test for 160-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_160_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_160_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_512 0x111221bf +/**< Functionality ID for Miller-Rabin primality test for 512-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_512_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_512_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_768 0x1d0d21d6 +/**< Functionality ID for Miller-Rabin primality test for 768-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_768_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_768_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_1024 0x250d21ed +/**< Functionality ID for Miller-Rabin primality test for 1024-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_1024_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_1024_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_1536 0x350d2204 +/**< Functionality ID for Miller-Rabin primality test for 1536-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_1536_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_1536_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_2048 0x490d221b +/**< Functionality ID for Miller-Rabin primality test for 2048-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_2048_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_2048_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_3072 0x4d0d2232 +/**< Functionality ID for Miller-Rabin primality test for 3072-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_3072_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_3072_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_4096 0x650d2249 +/**< Functionality ID for Miller-Rabin primality test for 4096-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_4096_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_4096_input::m m @endlink + * @li no output parameters + */ +#define PKE_MR_PT_L512 0x18182260 +/**< Functionality ID for Miller-Rabin primality test for 512-bit numbers + * @li 2 input parameters : @link icp_qat_fw_mmp_mr_pt_l512_input::x x + * @endlink @link icp_qat_fw_mmp_mr_pt_l512_input::m m @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_160 0x0e0c227e +/**< Functionality ID for Lucas primality test for 160-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_160_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_512 0x110c228f +/**< Functionality ID for Lucas primality test for 512-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_512_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_768 0x130c22a0 +/**< Functionality ID for Lucas primality test for 768-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_768_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_1024 0x150c22b1 +/**< Functionality ID for Lucas primality test for 1024-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_1024_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_1536 0x190c22c2 +/**< Functionality ID for Lucas primality test for 1536-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_1536_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_2048 0x1d0c22d3 +/**< Functionality ID for Lucas primality test for 2048-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_2048_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_3072 0x250c22e4 +/**< Functionality ID for Lucas primality test for 3072-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_3072_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_4096 0x661522f5 +/**< Functionality ID for Lucas primality test for 4096-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_4096_input::m m + * @endlink + * @li no output parameters + */ +#define PKE_LUCAS_PT_L512 0x1617230a +/**< Functionality ID for Lucas primality test for L512-bit numbers + * @li 1 input parameters : @link icp_qat_fw_mmp_lucas_pt_l512_input::m m + * @endlink + * @li no output parameters + */ +#define MATHS_MODEXP_L512 0x150c2327 +/**< Functionality ID for Modular exponentiation for numbers less than 512-bits + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l512_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l512_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l512_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l512_output::r r +@endlink + */ +#define MATHS_MODEXP_L1024 0x2d0c233e +/**< Functionality ID for Modular exponentiation for numbers less than 1024-bit + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l1024_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l1024_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l1024_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l1024_output::r r + * @endlink + */ +#define MATHS_MODEXP_L1536 0x410c2355 +/**< Functionality ID for Modular exponentiation for numbers less than 1536-bits + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l1536_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l1536_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l1536_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l1536_output::r r + * @endlink + */ +#define MATHS_MODEXP_L2048 0x5e12236c +/**< Functionality ID for Modular exponentiation for numbers less than 2048-bit + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l2048_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l2048_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l2048_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l2048_output::r r + * @endlink + */ +#define MATHS_MODEXP_L2560 0x60162388 +/**< Functionality ID for Modular exponentiation for numbers less than 2560-bits + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l2560_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l2560_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l2560_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l2560_output::r r + * @endlink + */ +#define MATHS_MODEXP_L3072 0x650c23a9 +/**< Functionality ID for Modular exponentiation for numbers less than 3072-bits + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l3072_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l3072_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l3072_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l3072_output::r r + * @endlink + */ +#define MATHS_MODEXP_L3584 0x801623c0 +/**< Functionality ID for Modular exponentiation for numbers less than 3584-bits + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l3584_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l3584_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l3584_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l3584_output::r r + * @endlink + */ +#define MATHS_MODEXP_L4096 0x850c23e1 +/**< Functionality ID for Modular exponentiation for numbers less than 4096-bit + * @li 3 input parameters : @link icp_qat_fw_maths_modexp_l4096_input::g g + * @endlink @link icp_qat_fw_maths_modexp_l4096_input::e e @endlink @link + * icp_qat_fw_maths_modexp_l4096_input::m m @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modexp_l4096_output::r r + * @endlink + */ +#define MATHS_MODINV_ODD_L128 0x090623f8 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 128 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l128_input::a a + * @endlink @link icp_qat_fw_maths_modinv_odd_l128_input::b b @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l128_output::c + * c @endlink + */ +#define MATHS_MODINV_ODD_L192 0x0a0623fe +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 192 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l192_input::a a + * @endlink @link icp_qat_fw_maths_modinv_odd_l192_input::b b @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l192_output::c + * c @endlink + */ +#define MATHS_MODINV_ODD_L256 0x0a062404 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 256 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l256_input::a a + * @endlink @link icp_qat_fw_maths_modinv_odd_l256_input::b b @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l256_output::c + * c @endlink + */ +#define MATHS_MODINV_ODD_L384 0x0b06240a +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 384 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l384_input::a a + * @endlink @link icp_qat_fw_maths_modinv_odd_l384_input::b b @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l384_output::c + * c @endlink + */ +#define MATHS_MODINV_ODD_L512 0x0c062410 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 512 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l512_input::a a + * @endlink @link icp_qat_fw_maths_modinv_odd_l512_input::b b @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l512_output::c + * c @endlink + */ +#define MATHS_MODINV_ODD_L768 0x0e062416 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 768 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l768_input::a a + * @endlink @link icp_qat_fw_maths_modinv_odd_l768_input::b b @endlink + * @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l768_output::c + * c @endlink + */ +#define MATHS_MODINV_ODD_L1024 0x1006241c +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 1024 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l1024_input::a + * a @endlink @link icp_qat_fw_maths_modinv_odd_l1024_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_odd_l1024_output::c c @endlink + */ +#define MATHS_MODINV_ODD_L1536 0x18062422 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 1536 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l1536_input::a + * a @endlink @link icp_qat_fw_maths_modinv_odd_l1536_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_odd_l1536_output::c c @endlink + */ +#define MATHS_MODINV_ODD_L2048 0x20062428 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 2048 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l2048_input::a + * a @endlink @link icp_qat_fw_maths_modinv_odd_l2048_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_odd_l2048_output::c c @endlink + */ +#define MATHS_MODINV_ODD_L3072 0x3006242e +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 3072 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l3072_input::a + * a @endlink @link icp_qat_fw_maths_modinv_odd_l3072_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_odd_l3072_output::c c @endlink + */ +#define MATHS_MODINV_ODD_L4096 0x40062434 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 4096 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l4096_input::a + * a @endlink @link icp_qat_fw_maths_modinv_odd_l4096_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_odd_l4096_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L128 0x0906243a +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 128 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l128_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l128_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l128_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L192 0x0a062440 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 192 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l192_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l192_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l192_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L256 0x0a062446 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 256 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l256_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l256_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l256_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L384 0x0e0b244c +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 384 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l384_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l384_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l384_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L512 0x110b2457 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 512 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l512_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l512_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l512_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L768 0x170b2462 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 768 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l768_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l768_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l768_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L1024 0x1d0b246d +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 1024 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l1024_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l1024_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l1024_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L1536 0x290b2478 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 1536 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l1536_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l1536_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l1536_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L2048 0x350b2483 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 2048 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l2048_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l2048_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l2048_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L3072 0x4d0b248e +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 3072 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l3072_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l3072_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l3072_output::c c @endlink + */ +#define MATHS_MODINV_EVEN_L4096 0x650b2499 +/**< Functionality ID for Modular multiplicative inverse for numbers less than + * 4096 bits + * @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l4096_input::a + * a @endlink @link icp_qat_fw_maths_modinv_even_l4096_input::b b @endlink + * @li 1 output parameters : @link + * icp_qat_fw_maths_modinv_even_l4096_output::c c @endlink + */ +#define PKE_DSA_GEN_P_1024_160 0x381824a4 +/**< Functionality ID for DSA parameter generation P + * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_1024_160_input::x + * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_1024_160_input::q q @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_gen_p_1024_160_output::p p @endlink + */ +#define PKE_DSA_GEN_G_1024 0x261424d4 +/**< Functionality ID for DSA key generation G + * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_g_1024_input::p p + * @endlink @link icp_qat_fw_mmp_dsa_gen_g_1024_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_gen_g_1024_input::h h @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_g_1024_output::g g + * @endlink + */ +#define PKE_DSA_GEN_Y_1024 0x291224ed +/**< Functionality ID for DSA key generation Y + * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_y_1024_input::p p + * @endlink @link icp_qat_fw_mmp_dsa_gen_y_1024_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_gen_y_1024_input::x x @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_y_1024_output::y y + * @endlink + */ +#define PKE_DSA_SIGN_R_1024_160 0x2c1c2504 +/**< Functionality ID for DSA Sign R + * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_1024_160_input::k + * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_1024_160_input::p p @endlink + * @link icp_qat_fw_mmp_dsa_sign_r_1024_160_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_1024_160_input::g g @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_1024_160_output::r r @endlink + */ +#define PKE_DSA_SIGN_S_160 0x12142526 +/**< Functionality ID for DSA Sign S + * @li 5 input parameters : @link icp_qat_fw_mmp_dsa_sign_s_160_input::m m + * @endlink @link icp_qat_fw_mmp_dsa_sign_s_160_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_160_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_160_input::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_160_input::x x @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_sign_s_160_output::s s + * @endlink + */ +#define PKE_DSA_SIGN_R_S_1024_160 0x301e2540 +/**< Functionality ID for DSA Sign R S + * @li 6 input parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_input::x x @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_output::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_1024_160_output::s s @endlink + */ +#define PKE_DSA_VERIFY_1024_160 0x323a2570 +/**< Functionality ID for DSA Verify + * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_1024_160_input::r + * r @endlink @link icp_qat_fw_mmp_dsa_verify_1024_160_input::s s @endlink + * @link icp_qat_fw_mmp_dsa_verify_1024_160_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_verify_1024_160_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_verify_1024_160_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_verify_1024_160_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_verify_1024_160_input::y y @endlink + * @li no output parameters + */ +#define PKE_DSA_GEN_P_2048_224 0x341d25be +/**< Functionality ID for DSA parameter generation P + * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_2048_224_input::x + * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_2048_224_input::q q @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_gen_p_2048_224_output::p p @endlink + */ +#define PKE_DSA_GEN_Y_2048 0x4d1225ea +/**< Functionality ID for DSA key generation Y + * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_y_2048_input::p p + * @endlink @link icp_qat_fw_mmp_dsa_gen_y_2048_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_gen_y_2048_input::x x @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_y_2048_output::y y + * @endlink + */ +#define PKE_DSA_SIGN_R_2048_224 0x511c2601 +/**< Functionality ID for DSA Sign R + * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_2048_224_input::k + * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_2048_224_input::p p @endlink + * @link icp_qat_fw_mmp_dsa_sign_r_2048_224_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_2048_224_input::g g @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_2048_224_output::r r @endlink + */ +#define PKE_DSA_SIGN_S_224 0x15142623 +/**< Functionality ID for DSA Sign S + * @li 5 input parameters : @link icp_qat_fw_mmp_dsa_sign_s_224_input::m m + * @endlink @link icp_qat_fw_mmp_dsa_sign_s_224_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_224_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_224_input::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_224_input::x x @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_sign_s_224_output::s s + * @endlink + */ +#define PKE_DSA_SIGN_R_S_2048_224 0x571e263d +/**< Functionality ID for DSA Sign R S + * @li 6 input parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_input::x x @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_output::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_224_output::s s @endlink + */ +#define PKE_DSA_VERIFY_2048_224 0x6930266d +/**< Functionality ID for DSA Verify + * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_2048_224_input::r + * r @endlink @link icp_qat_fw_mmp_dsa_verify_2048_224_input::s s @endlink + * @link icp_qat_fw_mmp_dsa_verify_2048_224_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_224_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_224_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_224_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_224_input::y y @endlink + * @li no output parameters + */ +#define PKE_DSA_GEN_P_2048_256 0x431126b7 +/**< Functionality ID for DSA parameter generation P + * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_2048_256_input::x + * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_2048_256_input::q q @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_gen_p_2048_256_output::p p @endlink + */ +#define PKE_DSA_GEN_G_2048 0x4b1426ed +/**< Functionality ID for DSA key generation G + * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_g_2048_input::p p + * @endlink @link icp_qat_fw_mmp_dsa_gen_g_2048_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_gen_g_2048_input::h h @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_g_2048_output::g g + * @endlink + */ +#define PKE_DSA_SIGN_R_2048_256 0x5b182706 +/**< Functionality ID for DSA Sign R + * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_2048_256_input::k + * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_2048_256_input::p p @endlink + * @link icp_qat_fw_mmp_dsa_sign_r_2048_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_2048_256_input::g g @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_2048_256_output::r r @endlink + */ +#define PKE_DSA_SIGN_S_256 0x15142733 +/**< Functionality ID for DSA Sign S + * @li 5 input parameters : @link icp_qat_fw_mmp_dsa_sign_s_256_input::m m + * @endlink @link icp_qat_fw_mmp_dsa_sign_s_256_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_256_input::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_s_256_input::x x @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_sign_s_256_output::s s + * @endlink + */ +#define PKE_DSA_SIGN_R_S_2048_256 0x5a2a274d +/**< Functionality ID for DSA Sign R S + * @li 6 input parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_input::x x @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_output::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_2048_256_output::s s @endlink + */ +#define PKE_DSA_VERIFY_2048_256 0x723a2789 +/**< Functionality ID for DSA Verify + * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_2048_256_input::r + * r @endlink @link icp_qat_fw_mmp_dsa_verify_2048_256_input::s s @endlink + * @link icp_qat_fw_mmp_dsa_verify_2048_256_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_256_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_256_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_verify_2048_256_input::y y @endlink + * @li no output parameters + */ +#define PKE_DSA_GEN_P_3072_256 0x4b1127e0 +/**< Functionality ID for DSA parameter generation P + * @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_3072_256_input::x + * x @endlink @link icp_qat_fw_mmp_dsa_gen_p_3072_256_input::q q @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_gen_p_3072_256_output::p p @endlink + */ +#define PKE_DSA_GEN_G_3072 0x4f142816 +/**< Functionality ID for DSA key generation G + * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_g_3072_input::p p + * @endlink @link icp_qat_fw_mmp_dsa_gen_g_3072_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_gen_g_3072_input::h h @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_g_3072_output::g g + * @endlink + */ +#define PKE_DSA_GEN_Y_3072 0x5112282f +/**< Functionality ID for DSA key generation Y + * @li 3 input parameters : @link icp_qat_fw_mmp_dsa_gen_y_3072_input::p p + * @endlink @link icp_qat_fw_mmp_dsa_gen_y_3072_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_gen_y_3072_input::x x @endlink + * @li 1 output parameters : @link icp_qat_fw_mmp_dsa_gen_y_3072_output::y y + * @endlink + */ +#define PKE_DSA_SIGN_R_3072_256 0x59282846 +/**< Functionality ID for DSA Sign R + * @li 4 input parameters : @link icp_qat_fw_mmp_dsa_sign_r_3072_256_input::k + * k @endlink @link icp_qat_fw_mmp_dsa_sign_r_3072_256_input::p p @endlink + * @link icp_qat_fw_mmp_dsa_sign_r_3072_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_3072_256_input::g g @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_3072_256_output::r r @endlink + */ +#define PKE_DSA_SIGN_R_S_3072_256 0x61292874 +/**< Functionality ID for DSA Sign R S + * @li 6 input parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::k k @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_input::x x @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_output::r r @endlink @link + * icp_qat_fw_mmp_dsa_sign_r_s_3072_256_output::s s @endlink + */ +#define PKE_DSA_VERIFY_3072_256 0x7f4328ae +/**< Functionality ID for DSA Verify + * @li 7 input parameters : @link icp_qat_fw_mmp_dsa_verify_3072_256_input::r + * r @endlink @link icp_qat_fw_mmp_dsa_verify_3072_256_input::s s @endlink + * @link icp_qat_fw_mmp_dsa_verify_3072_256_input::m m @endlink @link + * icp_qat_fw_mmp_dsa_verify_3072_256_input::p p @endlink @link + * icp_qat_fw_mmp_dsa_verify_3072_256_input::q q @endlink @link + * icp_qat_fw_mmp_dsa_verify_3072_256_input::g g @endlink @link + * icp_qat_fw_mmp_dsa_verify_3072_256_input::y y @endlink + * @li no output parameters + */ +#define PKE_ECDSA_SIGN_RS_GF2_L256 0x46512907 +/**< Functionality ID for ECDSA Sign RS for curves B/K-163 and B/K-233 + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l256_input::in in @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l256_output::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l256_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_R_GF2_L256 0x323a298f +/**< Functionality ID for ECDSA Sign R for curves B/K-163 and B/K-233 + * @li 7 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::xg xg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::yg yg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::n n @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::q q @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::a a @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::b b @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_input::k k @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l256_output::r r @endlink + */ +#define PKE_ECDSA_SIGN_S_GF2_L256 0x2b2229e6 +/**< Functionality ID for ECDSA Sign S for curves with n < 2^256 + * @li 5 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::e e @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::d d @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::k k @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_input::n n @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l256_output::s s @endlink + */ +#define PKE_ECDSA_VERIFY_GF2_L256 0x337e2a27 +/**< Functionality ID for ECDSA Verify for curves B/K-163 and B/K-233 + * @li 1 input parameters : @link + *icp_qat_fw_mmp_ecdsa_verify_gf2_l256_input::in in @endlink + * @li no output parameters + */ +#define PKE_ECDSA_SIGN_RS_GF2_L512 0x5e5f2ad7 +/**< Functionality ID for ECDSA Sign RS + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l512_input::in in @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l512_output::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_l512_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_R_GF2_L512 0x84312b6a +/**< Functionality ID for ECDSA GF2 Sign R + * @li 7 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::xg xg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::yg yg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::n n @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::q q @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::a a @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::b b @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_input::k k @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_l512_output::r r @endlink + */ +#define PKE_ECDSA_SIGN_S_GF2_L512 0x26182bbe +/**< Functionality ID for ECDSA GF2 Sign S + * @li 5 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::e e @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::d d @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::k k @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_input::n n @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_l512_output::s s @endlink + */ +#define PKE_ECDSA_VERIFY_GF2_L512 0x58892bea +/**< Functionality ID for ECDSA GF2 Verify + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_verify_gf2_l512_input::in in @endlink + * @li no output parameters + */ +#define PKE_ECDSA_SIGN_RS_GF2_571 0x554a2c93 +/**< Functionality ID for ECDSA GF2 Sign RS for curves B-571/K-571 + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_571_input::in in @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_571_output::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gf2_571_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_S_GF2_571 0x52332d09 +/**< Functionality ID for ECDSA GF2 Sign S for curves with deg(q) < 576 + * @li 5 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::e e @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::d d @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::k k @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_input::n n @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gf2_571_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_R_GF2_571 0x731a2d51 +/**< Functionality ID for ECDSA GF2 Sign R for degree 571 + * @li 7 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::xg xg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::yg yg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::n n @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::q q @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::a a @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::b b @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_input::k k @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gf2_571_output::r r @endlink + */ +#define PKE_ECDSA_VERIFY_GF2_571 0x4f6c2d91 +/**< Functionality ID for ECDSA GF2 Verify for degree 571 + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_verify_gf2_571_input::in in @endlink + * @li no output parameters + */ +#define MATHS_POINT_MULTIPLICATION_GF2_L256 0x3b242e38 +/**< Functionality ID for MATHS GF2 Point Multiplication + * @li 7 input parameters : @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::k k @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::xg xg @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::yg yg @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::a a @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::b b @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::q q @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_input::h h @endlink + * @li 2 output parameters : @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_output::xk xk @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l256_output::yk yk @endlink + */ +#define MATHS_POINT_VERIFY_GF2_L256 0x231a2e7c +/**< Functionality ID for MATHS GF2 Point Verification + * @li 5 input parameters : @link + * icp_qat_fw_maths_point_verify_gf2_l256_input::xq xq @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l256_input::yq yq @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l256_input::q q @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l256_input::a a @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l256_input::b b @endlink + * @li no output parameters + */ +#define MATHS_POINT_MULTIPLICATION_GF2_L512 0x722c2e96 +/**< Functionality ID for MATHS GF2 Point Multiplication + * @li 7 input parameters : @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::k k @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::xg xg @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::yg yg @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::a a @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::b b @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::q q @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_input::h h @endlink + * @li 2 output parameters : @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_output::xk xk @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_l512_output::yk yk @endlink + */ +#define MATHS_POINT_VERIFY_GF2_L512 0x25132ee2 +/**< Functionality ID for MATHS GF2 Point Verification + * @li 5 input parameters : @link + * icp_qat_fw_maths_point_verify_gf2_l512_input::xq xq @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l512_input::yq yq @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l512_input::q q @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l512_input::a a @endlink @link + * icp_qat_fw_maths_point_verify_gf2_l512_input::b b @endlink + * @li no output parameters + */ +#define MATHS_POINT_MULTIPLICATION_GF2_571 0x44152ef5 +/**< Functionality ID for ECC GF2 Point Multiplication for curves B-571/K-571 + * @li 7 input parameters : @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::k k @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::xg xg @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::yg yg @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::a a @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::b b @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::q q @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_input::h h @endlink + * @li 2 output parameters : @link + * icp_qat_fw_maths_point_multiplication_gf2_571_output::xk xk @endlink @link + * icp_qat_fw_maths_point_multiplication_gf2_571_output::yk yk @endlink + */ +#define MATHS_POINT_VERIFY_GF2_571 0x12072f1b +/**< Functionality ID for ECC GF2 Point Verification for degree 571 + * @li 5 input parameters : @link + * icp_qat_fw_maths_point_verify_gf2_571_input::xq xq @endlink @link + * icp_qat_fw_maths_point_verify_gf2_571_input::yq yq @endlink @link + * icp_qat_fw_maths_point_verify_gf2_571_input::q q @endlink @link + * icp_qat_fw_maths_point_verify_gf2_571_input::a a @endlink @link + * icp_qat_fw_maths_point_verify_gf2_571_input::b b @endlink + * @li no output parameters + */ +#define PKE_KPT_ECDSA_SIGN_RS_GF2_L256 0x515217d9 +/**< Functionality ID for KPT ECDSA Sign RS for curves B/K-163 and B/K-233 + * @li 3 input parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_input::in in @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_input::d d @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_input::c c @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_output::r r @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l256_output::s s @endlink + */ +#define PKE_KPT_ECDSA_SIGN_RS_GF2_L512 0x4d811987 +/**< Functionality ID for KPT ECDSA Sign RS + * @li 3 input parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_input::in in @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_input::d d @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_input::c c @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_output::r r @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_l512_output::s s @endlink + */ +#define PKE_KPT_ECDSA_SIGN_RS_GF2_571 0x45731898 +/**< Functionality ID for KPT ECDSA GF2 Sign RS for curves B-571/K-571 + * @li 3 input parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_input::in in @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_input::d d @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_input::c c @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_output::r r @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gf2_571_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_R_GFP_L256 0x431b2f22 +/**< Functionality ID for ECDSA GFP Sign R + * @li 7 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::xg xg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::yg yg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::n n @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::q q @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::a a @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::b b @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_input::k k @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l256_output::r r @endlink + */ +#define PKE_ECDSA_SIGN_S_GFP_L256 0x2b252f6d +/**< Functionality ID for ECDSA GFP Sign S + * @li 5 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::e e @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::d d @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::k k @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_input::n n @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l256_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_RS_GFP_L256 0x6a3c2fa6 +/**< Functionality ID for ECDSA GFP Sign RS + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l256_input::in in @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l256_output::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l256_output::s s @endlink + */ +#define PKE_ECDSA_VERIFY_GFP_L256 0x325b3023 +/**< Functionality ID for ECDSA GFP Verify + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_verify_gfp_l256_input::in in @endlink + * @li no output parameters + */ +#define PKE_ECDSA_SIGN_R_GFP_L512 0x4e2530b3 +/**< Functionality ID for ECDSA GFP Sign R + * @li 7 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::xg xg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::yg yg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::n n @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::q q @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::a a @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::b b @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_input::k k @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_l512_output::r r @endlink + */ +#define PKE_ECDSA_SIGN_S_GFP_L512 0x251830fa +/**< Functionality ID for ECDSA GFP Sign S + * @li 5 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::e e @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::d d @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::k k @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_input::n n @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_l512_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_RS_GFP_L512 0x5a2b3127 +/**< Functionality ID for ECDSA GFP Sign RS + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l512_input::in in @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l512_output::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_l512_output::s s @endlink + */ +#define PKE_ECDSA_VERIFY_GFP_L512 0x3553318a +/**< Functionality ID for ECDSA GFP Verify + * @li 1 input parameters : @link +icp_qat_fw_mmp_ecdsa_verify_gfp_l512_input::in in @endlink + * @li no output parameters + */ +#define PKE_ECDSA_SIGN_R_GFP_521 0x772c31fe +/**< Functionality ID for ECDSA GFP Sign R + * @li 7 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::xg xg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::yg yg @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::n n @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::q q @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::a a @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::b b @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_input::k k @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_r_gfp_521_output::r r @endlink + */ +#define PKE_ECDSA_SIGN_S_GFP_521 0x52343251 +/**< Functionality ID for ECDSA GFP Sign S + * @li 5 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::e e @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::d d @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::k k @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_input::n n @endlink + * @li 1 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_s_gfp_521_output::s s @endlink + */ +#define PKE_ECDSA_SIGN_RS_GFP_521 0x494a329b +/**< Functionality ID for ECDSA GFP Sign RS + * @li 1 input parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_521_input::in in @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_521_output::r r @endlink @link + * icp_qat_fw_mmp_ecdsa_sign_rs_gfp_521_output::s s @endlink + */ +#define PKE_ECDSA_VERIFY_GFP_521 0x554c331f +/**< Functionality ID for ECDSA GFP Verify + * @li 1 input parameters : @link +icp_qat_fw_mmp_ecdsa_verify_gfp_521_input::in in @endlink + * @li no output parameters + */ +#define MATHS_POINT_MULTIPLICATION_GFP_L256 0x432033a6 +/**< Functionality ID for ECC GFP Point Multiplication + * @li 7 input parameters : @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::k k @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::xg xg @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::yg yg @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::a a @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::b b @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::q q @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_input::h h @endlink + * @li 2 output parameters : @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_output::xk xk @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l256_output::yk yk @endlink + */ +#define MATHS_POINT_VERIFY_GFP_L256 0x1f0c33fc +/**< Functionality ID for ECC GFP Partial Point Verification + * @li 5 input parameters : @link + * icp_qat_fw_maths_point_verify_gfp_l256_input::xq xq @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l256_input::yq yq @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l256_input::q q @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l256_input::a a @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l256_input::b b @endlink + * @li no output parameters + */ +#define MATHS_POINT_MULTIPLICATION_GFP_L512 0x41253419 +/**< Functionality ID for ECC GFP Point Multiplication + * @li 7 input parameters : @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::k k @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::xg xg @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::yg yg @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::a a @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::b b @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::q q @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_input::h h @endlink + * @li 2 output parameters : @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_output::xk xk @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_l512_output::yk yk @endlink + */ +#define MATHS_POINT_VERIFY_GFP_L512 0x2612345c +/**< Functionality ID for ECC GFP Partial Point + * @li 5 input parameters : @link + * icp_qat_fw_maths_point_verify_gfp_l512_input::xq xq @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l512_input::yq yq @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l512_input::q q @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l512_input::a a @endlink @link + * icp_qat_fw_maths_point_verify_gfp_l512_input::b b @endlink + * @li no output parameters + */ +#define MATHS_POINT_MULTIPLICATION_GFP_521 0x5511346e +/**< Functionality ID for ECC GFP Point Multiplication + * @li 7 input parameters : @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::k k @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::xg xg @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::yg yg @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::a a @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::b b @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::q q @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_input::h h @endlink + * @li 2 output parameters : @link + * icp_qat_fw_maths_point_multiplication_gfp_521_output::xk xk @endlink @link + * icp_qat_fw_maths_point_multiplication_gfp_521_output::yk yk @endlink + */ +#define MATHS_POINT_VERIFY_GFP_521 0x0e0734be +/**< Functionality ID for ECC GFP Partial Point Verification + * @li 5 input parameters : @link + * icp_qat_fw_maths_point_verify_gfp_521_input::xq xq @endlink @link + * icp_qat_fw_maths_point_verify_gfp_521_input::yq yq @endlink @link + * icp_qat_fw_maths_point_verify_gfp_521_input::q q @endlink @link + * icp_qat_fw_maths_point_verify_gfp_521_input::a a @endlink @link + * icp_qat_fw_maths_point_verify_gfp_521_input::b b @endlink + * @li no output parameters + */ +#define PKE_KPT_ECDSA_SIGN_RS_GFP_L256 0x1b6b182c +/**< Functionality ID for KPT ECDSA GFP Sign RS + * @li 3 input parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_input::in in @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_input::d d @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_input::c c @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_output::r r @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l256_output::s s @endlink + */ +#define PKE_KPT_ECDSA_SIGN_RS_GFP_L512 0x7439179f +/**< Functionality ID for KPT ECDSA GFP Sign RS + * @li 3 input parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_input::in in @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_input::d d @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_input::c c @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_output::r r @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_l512_output::s s @endlink + */ +#define PKE_KPT_ECDSA_SIGN_RS_GFP_521 0x3b7a190c +/**< Functionality ID for KPT ECDSA GFP Sign RS + * @li 3 input parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_input::in in @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_input::d d @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_input::c c @endlink + * @li 2 output parameters : @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_output::r r @endlink @link + * icp_qat_fw_mmp_kpt_ecdsa_sign_rs_gfp_521_output::s s @endlink + */ + +#define PKE_LIVENESS 0x00000001 +/**< Functionality ID for PKE_LIVENESS + * @li 0 input parameter(s) + * @li 1 output parameter(s) (8 qwords) + */ +#define PKE_INTERFACE_SIGNATURE 0x972ded54 +/**< Encoded signature of the interface specifications + */ + +#define PKE_INVALID_FUNC_ID 0xffffffff + +#endif /* __ICP_QAT_FW_MMP_IDS__ */ diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h new file mode 100644 index 000000000..b2cdf0a0a --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_fw_pke.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +/** + * @file icp_qat_fw_pke.h + * @defgroup icp_qat_fw_pke ICP QAT FW PKE Processing Definitions + * @ingroup icp_qat_fw + * Revision: 0.1 + * @brief + * This file documents the external interfaces that the QAT FW running + * on the QAT Acceleration Engine provides to clients wanting to + * accelerate crypto asymmetric applications + */ + +#ifndef _ICP_QAT_FW_PKE_H_ +#define _ICP_QAT_FW_PKE_H_ + +/* + * Keep all dpdk-specific changes in this section + */ + +#include <stdint.h> + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +/* End of DPDK-specific section + * Don't modify below this. + */ + +/* + **************************************************************************** + * Include local header files + **************************************************************************** + */ +#include "icp_qat_fw.h" + +/** + ***************************************************************************** + * + * @ingroup icp_qat_fw_pke + * + * @brief + * PKE response status field structure contained + * within LW1, comprising the common error codes and + * the response flags. + * + *****************************************************************************/ +struct icp_qat_fw_pke_resp_status { + u8 comn_err_code; + /**< 8 bit common error code */ + + u8 pke_resp_flags; + /**< 8-bit PKE response flags */ +}; + +/** + ***************************************************************************** + * @ingroup icp_qat_fw_pke + * Definition of the QAT FW PKE request header pars field. + * + * @description + * PKE request message header pars structure + * + *****************************************************************************/ +struct icp_qat_fw_req_hdr_pke_cd_pars { + /**< LWs 2-3 */ + u64 content_desc_addr; + /**< Content descriptor pointer */ + + /**< LW 4 */ + u32 content_desc_resrvd; + /**< Content descriptor reserved field */ + + /**< LW 5 */ + u32 func_id; + /**< MMP functionality Id */ +}; + +/** + ***************************************************************************** + * @ingroup icp_qat_fw_pke + * Definition of the QAT FW PKE request header mid section. + * + * @description + * PKE request message header middle structure + * + *****************************************************************************/ +struct icp_qat_fw_req_pke_mid { + /**< LWs 6-11 */ + u64 opaque; + /**< Opaque data passed unmodified from the request to response messages + * by firmware (fw) + */ + + u64 src_data_addr; + /**< Generic definition of the source data supplied to the QAT AE. The + * common flags are used to further describe the attributes of this + * field + */ + + u64 dest_data_addr; + /**< Generic definition of the destination data supplied to the QAT AE. + * The common flags are used to further describe the attributes of this + * field + */ +}; + +/** + ***************************************************************************** + * @ingroup icp_qat_fw_pke + * Definition of the QAT FW PKE request header. + * + * @description + * PKE request message header structure + * + *****************************************************************************/ +struct icp_qat_fw_req_pke_hdr { + /**< LW0 */ + u8 resrvd1; + /**< reserved field */ + + u8 resrvd2; + /**< reserved field */ + + u8 service_type; + /**< Service type */ + + u8 hdr_flags; + /**< This represents a flags field for the Service Request. + * The most significant bit is the 'valid' flag and the only + * one used. All remaining bit positions are unused and + * are therefore reserved and need to be set to 0. + */ + + /**< LW1 */ + u16 comn_req_flags; + /**< Common Request flags must indicate flat buffer + * Common Request flags - PKE slice flags no longer used - slice + * allocated to a threadstrand. + */ + + u8 kpt_mask; + /** < KPT input parameters array mask, indicate which node in array is + *encrypted + */ + + u8 kpt_rn_mask; + /**< KPT random node(RN) mask - indicate which node is RN that QAT + * should generate itself. + */ + + /**< LWs 2-5 */ + struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; + /**< PKE request message header pars structure */ +}; + +/** + *************************************************************************** + * + * @ingroup icp_qat_fw_pke + * + * @brief + * PKE request message structure (64 bytes) + * + *****************************************************************************/ +struct icp_qat_fw_pke_request { + /**< LWs 0-5 */ + struct icp_qat_fw_req_pke_hdr pke_hdr; + /**< Request header for PKE - CD Header/Param size must be zero */ + + /**< LWs 6-11 */ + struct icp_qat_fw_req_pke_mid pke_mid; + /**< Request middle section for PKE */ + + /**< LW 12 */ + u8 output_param_count; + /**< Number of output large integers for request */ + + u8 input_param_count; + /**< Number of input large integers for request */ + + u16 resrvd1; + /** Reserved **/ + + /**< LW 13 */ + u32 resrvd2; + /**< Reserved */ + + /**< LWs 14-15 */ + u64 next_req_adr; + /** < PKE - next request address */ +}; + +/** + ***************************************************************************** + * + * @ingroup icp_qat_fw_pke + * + * @brief + * PKE response message header structure + * + *****************************************************************************/ +struct icp_qat_fw_resp_pke_hdr { + /**< LW0 */ + u8 resrvd1; + /**< Reserved */ + + u8 resrvd2; + /**< Reserved */ + + u8 response_type; + /**< Response type - copied from the request to the response message */ + + u8 hdr_flags; + /**< This represents a flags field for the Response. + * The most significant bit is the 'valid' flag and the only + * one used. All remaining bit positions are unused and + * are therefore reserved + */ + + /**< LW1 */ + struct icp_qat_fw_pke_resp_status resp_status; + + u16 resrvd4; + /**< Set to zero. */ +}; + +/** + ***************************************************************************** + * + * @ingroup icp_qat_fw_pke + * + * @brief + * PKE response message structure (32 bytes) + * + *****************************************************************************/ +struct icp_qat_fw_pke_resp { + /**< LWs 0-1 */ + struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; + /**< Response header for PKE */ + + /**< LWs 2-3 */ + u64 opaque; + /**< Opaque data passed from the request to the response message */ + + /**< LWs 4-5 */ + u64 src_data_addr; + /**< Generic definition of the source data supplied to the QAT AE. The + * common flags are used to further describe the attributes of this + * field + */ + + /**< LWs 6-7 */ + u64 dest_data_addr; + /**< Generic definition of the destination data supplied to the QAT AE. + * The common flags are used to further describe the attributes of this + * field + */ +}; + +/* ========================================================================= */ +/* MACRO DEFINITIONS */ +/* ========================================================================= */ + +/**< @ingroup icp_qat_fw_pke + * Macro defining the bit position and mask of the 'valid' flag, within the + * hdr_flags field of LW0 (service request and response) of the PKE request + */ +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1 + +/**< @ingroup icp_qat_fw_pke + * Macro defining the bit position and mask of the PKE status flag, within the + * status field LW1 of a PKE response message + */ +#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 +/**< @ingroup icp_qat_fw_pke + * Starting bit position indicating the PKE status flag within the PKE response + * pke_resp_flags byte. + */ + +#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 +/**< @ingroup icp_qat_fw_pke + * One bit mask used to determine PKE status mask + */ + +/* + * < @ingroup icp_qat_fw_pke + * *** PKE Response Status Field Definition *** + * The PKE response follows the CPM 1.5 message format. The status field is + * 16 bits wide, where the status flags are contained within the most + * significant byte of the icp_qat_fw_pke_resp_status structure. + * The lower 8 bits of this word now contain the common error codes, + * which are defined in the common header file(*). + */ +/* +=====+-----+----+-----+-----+-----+-----+-----+-----+---------------------+ + * | Bit | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | [7....0] | + * +=====+-----+----+-----+-----+-----+-----+-----+-----+---------------------+ + * |Flags|Rsrvd|Pke |Rsrvd|Rsrvd|Rsrvd|Rsrvd|Rsrvd|Rsrvd|Common error codes(*)| + * +=====+-----+----+-----+-----+-----+-----+-----+-----+---------------------+ + */ + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Macro for extraction of the PKE bit from the 16-bit status field + * particular to a PKE response. The status flags are contained within + * the most significant byte of the word. The lower 8 bits of this status + * word now contain the common error codes, which are defined in the common + * header file. The appropriate macro definition to extract the PKE status + * lag from the PKE response assumes that a single byte i.e. pke_resp_flags + * is passed to the macro. + * + * @param status + * Status to extract the PKE status bit + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(flags) \ + QAT_FIELD_GET((flags), QAT_COMN_RESP_PKE_STATUS_BITPOS, \ + QAT_COMN_RESP_PKE_STATUS_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Extract the valid flag from the PKE Request's header flags. Note that + * this invokes the common macro which may be used by either the request + * or the response. + * + * @param icp_qat_fw_req_pke_hdr Structure passed to extract the valid bit + * from the 'hdr_flags' field. + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_RQ_VALID_FLAG_GET(icp_qat_fw_req_pke_hdr) \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_GET(icp_qat_fw_req_pke_hdr) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Set the valid bit in the PKE Request's header flags. Note that + * this invokes the common macro which may be used by either the request + * or the response. + * + * @param icp_qat_fw_req_pke_hdr Structure passed to set the valid bit. + * @param val Value of the valid bit flag. + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_RQ_VALID_FLAG_SET(icp_qat_fw_req_pke_hdr, val) \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(icp_qat_fw_req_pke_hdr, val) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Extract the valid flag from the PKE Response's header flags. Note that + * invokes the common macro which may be used by either the request + * or the response. + * + * @param icp_qat_fw_resp_pke_hdr Structure to extract the valid bit + * from the 'hdr_flags' field. + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_RESP_VALID_FLAG_GET(icp_qat_fw_resp_pke_hdr) \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_GET(icp_qat_fw_resp_pke_hdr) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Set the valid bit in the PKE Response's header flags. Note that + * this invokes the common macro which may be used by either the + * request or the response. + * + * @param icp_qat_fw_resp_pke_hdr Structure to set the valid bit + * @param val Value of the valid bit flag. + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_RESP_VALID_FLAG_SET(icp_qat_fw_resp_pke_hdr, val) \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(icp_qat_fw_resp_pke_hdr, val) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Common macro to extract the valid flag from the header flags field + * within the header structure (request or response). + * + * @param hdr Structure (request or response) to extract the + * valid bit from the 'hdr_flags' field. + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_GET(hdr) \ + QAT_FIELD_GET(hdr.hdr_flags, ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_pke + * + * @description + * Common macro to set the valid bit in the header flags field within + * the header structure (request or response). + * + * @param hdr Structure (request or response) containing the header + * flags field, to allow the valid bit to be set. + * @param val Value of the valid bit flag. + * + *****************************************************************************/ +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr, val) \ + QAT_FIELD_SET((hdr.hdr_flags), (val), \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK) + +#endif /* _ICP_QAT_FW_PKE_H_ */ diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h new file mode 100644 index 000000000..e7961dba2 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/icp_qat_hw.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_HW_H_ +#define _ICP_QAT_HW_H_ + +enum icp_qat_hw_ae_id { + ICP_QAT_HW_AE_0 = 0, + ICP_QAT_HW_AE_1 = 1, + ICP_QAT_HW_AE_2 = 2, + ICP_QAT_HW_AE_3 = 3, + ICP_QAT_HW_AE_4 = 4, + ICP_QAT_HW_AE_5 = 5, + ICP_QAT_HW_AE_6 = 6, + ICP_QAT_HW_AE_7 = 7, + ICP_QAT_HW_AE_8 = 8, + ICP_QAT_HW_AE_9 = 9, + ICP_QAT_HW_AE_10 = 10, + ICP_QAT_HW_AE_11 = 11, + ICP_QAT_HW_AE_DELIMITER = 12 +}; + +enum icp_qat_hw_qat_id { + ICP_QAT_HW_QAT_0 = 0, + ICP_QAT_HW_QAT_1 = 1, + ICP_QAT_HW_QAT_2 = 2, + ICP_QAT_HW_QAT_3 = 3, + ICP_QAT_HW_QAT_4 = 4, + ICP_QAT_HW_QAT_5 = 5, + ICP_QAT_HW_QAT_DELIMITER = 6 +}; + +enum icp_qat_hw_auth_algo { + ICP_QAT_HW_AUTH_ALGO_NULL = 0, + ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, + ICP_QAT_HW_AUTH_ALGO_MD5 = 2, + ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, + ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, + ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, + ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, + ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, + ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, + ICP_QAT_HW_AUTH_RESERVED_1 = 15, + ICP_QAT_HW_AUTH_RESERVED_2 = 16, + ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, + ICP_QAT_HW_AUTH_RESERVED_3 = 18, + ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, + ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 +}; + +enum icp_qat_hw_auth_mode { + ICP_QAT_HW_AUTH_MODE0 = 0, + ICP_QAT_HW_AUTH_MODE1 = 1, + ICP_QAT_HW_AUTH_MODE2 = 2, + ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 +}; + +struct icp_qat_hw_auth_config { + uint32_t config; + uint32_t reserved; +}; + +#define QAT_AUTH_MODE_BITPOS 4 +#define QAT_AUTH_MODE_MASK 0xF +#define QAT_AUTH_ALGO_BITPOS 0 +#define QAT_AUTH_ALGO_MASK 0xF +#define QAT_AUTH_CMP_BITPOS 8 +#define QAT_AUTH_CMP_MASK 0x7F +#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16 +#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1 +#define QAT_AUTH_ALGO_SHA3_BITPOS 22 +#define QAT_AUTH_ALGO_SHA3_MASK 0x3 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24 +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF +#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0 +#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1 +#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0 +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0 + +#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ + ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ + (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ + (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \ + << QAT_AUTH_ALGO_SHA3_BITPOS) | \ + (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \ + QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \ + << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \ + (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \ + QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \ + << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \ + (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) + +#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \ + ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \ + QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \ + << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \ + (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \ + QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \ + << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS)) + +struct icp_qat_hw_auth_counter { + uint32_t counter; + uint32_t reserved; +}; + +#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF +#define QAT_AUTH_COUNT_BITPOS 0 +#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ + (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) + +struct icp_qat_hw_auth_setup { + struct icp_qat_hw_auth_config auth_config; + struct icp_qat_hw_auth_counter auth_counter; +}; + +#define QAT_HW_DEFAULT_ALIGNMENT 8 +#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) +#define ICP_QAT_HW_NULL_STATE1_SZ 32 +#define ICP_QAT_HW_MD5_STATE1_SZ 16 +#define ICP_QAT_HW_SHA1_STATE1_SZ 20 +#define ICP_QAT_HW_SHA224_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 +#define ICP_QAT_HW_SHA256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA384_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 +#define ICP_QAT_HW_SHA512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 +#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 +#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 + +#define ICP_QAT_HW_NULL_STATE2_SZ 32 +#define ICP_QAT_HW_MD5_STATE2_SZ 16 +#define ICP_QAT_HW_SHA1_STATE2_SZ 20 +#define ICP_QAT_HW_SHA224_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 +#define ICP_QAT_HW_SHA256_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 +#define ICP_QAT_HW_SHA384_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 +#define ICP_QAT_HW_SHA512_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48 +#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 +#define ICP_QAT_HW_F9_IK_SZ 16 +#define ICP_QAT_HW_F9_FK_SZ 16 +#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ + ICP_QAT_HW_F9_FK_SZ) +#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 +#define ICP_QAT_HW_GALOIS_H_SZ 16 +#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 +#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 + +struct icp_qat_hw_auth_sha512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; + uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; +}; + +struct icp_qat_hw_auth_sha3_512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; +}; + +struct icp_qat_hw_auth_algo_blk { + struct icp_qat_hw_auth_sha512 sha; +}; + +#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 +#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF + +enum icp_qat_hw_cipher_algo { + ICP_QAT_HW_CIPHER_ALGO_NULL = 0, + ICP_QAT_HW_CIPHER_ALGO_DES = 1, + ICP_QAT_HW_CIPHER_ALGO_3DES = 2, + ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, + ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, + ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, + ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, + ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, + ICP_QAT_HW_CIPHER_DELIMITER = 10 +}; + +enum icp_qat_hw_cipher_mode { + ICP_QAT_HW_CIPHER_ECB_MODE = 0, + ICP_QAT_HW_CIPHER_CBC_MODE = 1, + ICP_QAT_HW_CIPHER_CTR_MODE = 2, + ICP_QAT_HW_CIPHER_F8_MODE = 3, + ICP_QAT_HW_CIPHER_XTS_MODE = 6, + ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 +}; + +struct icp_qat_hw_cipher_config { + uint32_t val; + uint32_t reserved; +}; + +enum icp_qat_hw_cipher_dir { + ICP_QAT_HW_CIPHER_ENCRYPT = 0, + ICP_QAT_HW_CIPHER_DECRYPT = 1, +}; + +enum icp_qat_hw_auth_op { + ICP_QAT_HW_AUTH_VERIFY = 0, + ICP_QAT_HW_AUTH_GENERATE = 1, +}; + +enum icp_qat_hw_cipher_convert { + ICP_QAT_HW_CIPHER_NO_CONVERT = 0, + ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, +}; + +#define QAT_CIPHER_MODE_BITPOS 4 +#define QAT_CIPHER_MODE_MASK 0xF +#define QAT_CIPHER_ALGO_BITPOS 0 +#define QAT_CIPHER_ALGO_MASK 0xF +#define QAT_CIPHER_CONVERT_BITPOS 9 +#define QAT_CIPHER_CONVERT_MASK 0x1 +#define QAT_CIPHER_DIR_BITPOS 8 +#define QAT_CIPHER_DIR_MASK 0x1 +#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 +#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 +#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ + (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ + ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ + ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ + ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) +#define ICP_QAT_HW_DES_BLK_SZ 8 +#define ICP_QAT_HW_3DES_BLK_SZ 8 +#define ICP_QAT_HW_NULL_BLK_SZ 8 +#define ICP_QAT_HW_AES_BLK_SZ 16 +#define ICP_QAT_HW_KASUMI_BLK_SZ 8 +#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 +#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 +#define ICP_QAT_HW_NULL_KEY_SZ 256 +#define ICP_QAT_HW_DES_KEY_SZ 8 +#define ICP_QAT_HW_3DES_KEY_SZ 24 +#define ICP_QAT_HW_AES_128_KEY_SZ 16 +#define ICP_QAT_HW_AES_192_KEY_SZ 24 +#define ICP_QAT_HW_AES_256_KEY_SZ 32 +#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_KASUMI_KEY_SZ 16 +#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_ARC4_KEY_SZ 256 +#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 +#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 + +#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ + +/* These defines describe position of the bit-fields + * in the flags byte in B0 + */ +#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6 +#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3 + +#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \ + ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \ + | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \ + | ((q) - 1)) + +#define ICP_QAT_HW_CCM_NQ_CONST 15 +#define ICP_QAT_HW_CCM_AAD_B0_LEN 16 +#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2 +#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \ + ICP_QAT_HW_CCM_AAD_LEN_INFO) +#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16 +#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4 +#define ICP_QAT_HW_CCM_NONCE_OFFSET 1 + +struct icp_qat_hw_cipher_algo_blk { + struct icp_qat_hw_cipher_config cipher_config; + uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; +} __rte_cache_aligned; + +/* ========================================================================= */ +/* COMPRESSION SLICE */ +/* ========================================================================= */ + +enum icp_qat_hw_compression_direction { + ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0, + ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1, + ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_delayed_match { + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_algo { + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, + ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1, + ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 +}; + + +enum icp_qat_hw_compression_depth { + ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0, + ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1, + ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2, + ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3, + ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4 +}; + +enum icp_qat_hw_compression_file_type { + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5 +}; + +struct icp_qat_hw_compression_config { + uint32_t val; + uint32_t reserved; +}; + +#define QAT_COMPRESSION_DIR_BITPOS 4 +#define QAT_COMPRESSION_DIR_MASK 0x7 +#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16 +#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1 +#define QAT_COMPRESSION_ALGO_BITPOS 31 +#define QAT_COMPRESSION_ALGO_MASK 0x1 +#define QAT_COMPRESSION_DEPTH_BITPOS 28 +#define QAT_COMPRESSION_DEPTH_MASK 0x7 +#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24 +#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF + +#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \ + dir, delayed, algo, depth, filetype) \ + ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \ + (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \ + << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \ + (((algo) & QAT_COMPRESSION_ALGO_MASK) \ + << QAT_COMPRESSION_ALGO_BITPOS) | \ + (((depth) & QAT_COMPRESSION_DEPTH_MASK) \ + << QAT_COMPRESSION_DEPTH_BITPOS) | \ + (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \ + << QAT_COMPRESSION_FILE_TYPE_BITPOS)) + +#endif diff --git a/src/seastar/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h b/src/seastar/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h new file mode 100644 index 000000000..8adf20959 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _QAT_PKE_FUNCTIONALITY_ARRAYS_H_ +#define _QAT_PKE_FUNCTIONALITY_ARRAYS_H_ + +#include "icp_qat_fw_mmp_ids.h" + +/* + * Modular exponentiation functionality IDs + */ +static const uint32_t MOD_EXP_SIZE[][2] = { + { 512, MATHS_MODEXP_L512 }, + { 1024, MATHS_MODEXP_L1024 }, + { 1536, MATHS_MODEXP_L1536 }, + { 2048, MATHS_MODEXP_L2048 }, + { 2560, MATHS_MODEXP_L2560 }, + { 3072, MATHS_MODEXP_L3072 }, + { 3584, MATHS_MODEXP_L3584 }, + { 4096, MATHS_MODEXP_L4096 } +}; + +static const uint32_t MOD_INV_IDS_ODD[][2] = { + { 128, MATHS_MODINV_ODD_L128 }, + { 192, MATHS_MODINV_ODD_L192 }, + { 256, MATHS_MODINV_ODD_L256 }, + { 384, MATHS_MODINV_ODD_L384 }, + { 512, MATHS_MODINV_ODD_L512 }, + { 768, MATHS_MODINV_ODD_L768 }, + { 1024, MATHS_MODINV_ODD_L1024 }, + { 1536, MATHS_MODINV_ODD_L1536 }, + { 2048, MATHS_MODINV_ODD_L2048 }, + { 3072, MATHS_MODINV_ODD_L3072 }, + { 4096, MATHS_MODINV_ODD_L4096 }, +}; + +static const uint32_t MOD_INV_IDS_EVEN[][2] = { + { 128, MATHS_MODINV_EVEN_L128 }, + { 192, MATHS_MODINV_EVEN_L192 }, + { 256, MATHS_MODINV_EVEN_L256 }, + { 384, MATHS_MODINV_EVEN_L384 }, + { 512, MATHS_MODINV_EVEN_L512 }, + { 768, MATHS_MODINV_EVEN_L768 }, + { 1024, MATHS_MODINV_EVEN_L1024 }, + { 1536, MATHS_MODINV_EVEN_L1536 }, + { 2048, MATHS_MODINV_EVEN_L2048 }, + { 3072, MATHS_MODINV_EVEN_L3072 }, + { 4096, MATHS_MODINV_EVEN_L4096 }, +}; + +#endif diff --git a/src/seastar/dpdk/drivers/common/qat/qat_common.c b/src/seastar/dpdk/drivers/common/qat/qat_common.c new file mode 100644 index 000000000..475386697 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_common.c @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "qat_common.h" +#include "qat_device.h" +#include "qat_logs.h" + +int +qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset, + void *list_in, uint32_t data_len, + const uint16_t max_segs) +{ + int res = -EINVAL; + uint32_t buf_len, nr; + struct qat_sgl *list = (struct qat_sgl *)list_in; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + uint8_t *virt_addr[max_segs]; +#endif + + for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) { + if (offset >= rte_pktmbuf_data_len(buf)) { + offset -= rte_pktmbuf_data_len(buf); + continue; + } + + list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset; + list->buffers[nr].resrvd = 0; + list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset); +#endif + offset = 0; + buf_len += list->buffers[nr].len; + + if (buf_len >= data_len) { + list->buffers[nr].len -= buf_len - data_len; + res = 0; + break; + } + ++nr; + } + + if (unlikely(res != 0)) { + if (nr == max_segs) { + QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)", + max_segs); + } else { + QAT_DP_LOG(ERR, "Mbuf chain is too short"); + } + } else { + + list->num_bufs = ++nr; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs); + for (nr = 0; nr < list->num_bufs; nr++) { + QAT_DP_LOG(INFO, + "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64, + nr, list->buffers[nr].len, + list->buffers[nr].addr); + QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL", + virt_addr[nr], + list->buffers[nr].len); + } +#endif + } + + return res; +} + +void qat_stats_get(struct qat_pci_device *dev, + struct qat_common_stats *stats, + enum qat_service_type service) +{ + int i; + struct qat_qp **qp; + + if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) { + QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d", + stats, dev, service); + return; + } + + qp = dev->qps_in_use[service]; + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + if (qp[i] == NULL) { + QAT_LOG(DEBUG, "Service %d Uninitialised qp %d", + service, i); + continue; + } + + stats->enqueued_count += qp[i]->stats.enqueued_count; + stats->dequeued_count += qp[i]->stats.dequeued_count; + stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; + stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; + } +} + +void qat_stats_reset(struct qat_pci_device *dev, + enum qat_service_type service) +{ + int i; + struct qat_qp **qp; + + if (dev == NULL || service >= QAT_SERVICE_INVALID) { + QAT_LOG(ERR, "invalid param: dev %p, service %d", + dev, service); + return; + } + + qp = dev->qps_in_use[service]; + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + if (qp[i] == NULL) { + QAT_LOG(DEBUG, "Service %d Uninitialised qp %d", + service, i); + continue; + } + memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats)); + } + + QAT_LOG(DEBUG, "QAT: %d stats cleared", service); +} diff --git a/src/seastar/dpdk/drivers/common/qat/qat_common.h b/src/seastar/dpdk/drivers/common/qat/qat_common.h new file mode 100644 index 000000000..de9a3ba55 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_common.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_COMMON_H_ +#define _QAT_COMMON_H_ + +#include <stdint.h> + +#include <rte_mbuf.h> + +/**< Intel(R) QAT device name for PCI registration */ +#define QAT_PCI_NAME qat +#define QAT_64_BTYE_ALIGN_MASK (~0x3f) + +/* Intel(R) QuickAssist Technology device generation is enumerated + * from one according to the generation of the device + */ +enum qat_device_gen { + QAT_GEN1 = 1, + QAT_GEN2, + QAT_GEN3 +}; + +enum qat_service_type { + QAT_SERVICE_ASYMMETRIC = 0, + QAT_SERVICE_SYMMETRIC, + QAT_SERVICE_COMPRESSION, + QAT_SERVICE_INVALID +}; + +#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID) + +/**< Common struct for scatter-gather list operations */ +struct qat_flat_buf { + uint32_t len; + uint32_t resrvd; + uint64_t addr; +} __rte_packed; + +#define qat_sgl_hdr struct { \ + uint64_t resrvd; \ + uint32_t num_bufs; \ + uint32_t num_mapped_bufs; \ +} + +__extension__ +struct qat_sgl { + qat_sgl_hdr; + /* flexible array of flat buffers*/ + struct qat_flat_buf buffers[0]; +} __rte_packed __rte_cache_aligned; + +/** Common, i.e. not service-specific, statistics */ +struct qat_common_stats { + uint64_t enqueued_count; + /**< Count of all operations enqueued */ + uint64_t dequeued_count; + /**< Count of all operations dequeued */ + + uint64_t enqueue_err_count; + /**< Total error count on operations enqueued */ + uint64_t dequeue_err_count; + /**< Total error count on operations dequeued */ +}; + +struct qat_pci_device; + +int +qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset, + void *list_in, uint32_t data_len, + const uint16_t max_segs); +void +qat_stats_get(struct qat_pci_device *dev, + struct qat_common_stats *stats, + enum qat_service_type service); +void +qat_stats_reset(struct qat_pci_device *dev, + enum qat_service_type service); + +#endif /* _QAT_COMMON_H_ */ diff --git a/src/seastar/dpdk/drivers/common/qat/qat_device.c b/src/seastar/dpdk/drivers/common/qat/qat_device.c new file mode 100644 index 000000000..2a1cf3e17 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_device.c @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include <rte_string_fns.h> + +#include "qat_device.h" +#include "adf_transport_access_macros.h" +#include "qat_sym_pmd.h" +#include "qat_comp_pmd.h" + +/* Hardware device information per generation */ +__extension__ +struct qat_gen_hw_data qat_gen_config[] = { + [QAT_GEN1] = { + .dev_gen = QAT_GEN1, + .qp_hw_data = qat_gen1_qps, + .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN1 + }, + [QAT_GEN2] = { + .dev_gen = QAT_GEN2, + .qp_hw_data = qat_gen1_qps, + /* gen2 has same ring layout as gen1 */ + .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN2 + }, + [QAT_GEN3] = { + .dev_gen = QAT_GEN3, + .qp_hw_data = qat_gen3_qps, + .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3 + }, +}; + + +static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES]; +static int qat_nb_pci_devices; + +/* + * The set of PCI devices this driver supports + */ + +static const struct rte_pci_id pci_id_qat_map[] = { + { + RTE_PCI_DEVICE(0x8086, 0x0443), + }, + { + RTE_PCI_DEVICE(0x8086, 0x37c9), + }, + { + RTE_PCI_DEVICE(0x8086, 0x19e3), + }, + { + RTE_PCI_DEVICE(0x8086, 0x6f55), + }, + { + RTE_PCI_DEVICE(0x8086, 0x18a1), + }, + {.device_id = 0}, +}; + +static struct qat_pci_device * +qat_pci_get_dev(uint8_t dev_id) +{ + return &qat_pci_devices[dev_id]; +} + +static struct qat_pci_device * +qat_pci_get_named_dev(const char *name) +{ + struct qat_pci_device *dev; + unsigned int i; + + if (name == NULL) + return NULL; + + for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) { + dev = &qat_pci_devices[i]; + + if ((dev->attached == QAT_ATTACHED) && + (strcmp(dev->name, name) == 0)) + return dev; + } + + return NULL; +} + +static uint8_t +qat_pci_find_free_device_index(void) +{ + uint8_t dev_id; + + for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) { + if (qat_pci_devices[dev_id].attached == QAT_DETACHED) + break; + } + return dev_id; +} + +struct qat_pci_device * +qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev) +{ + char name[QAT_DEV_NAME_MAX_LEN]; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + return qat_pci_get_named_dev(name); +} + +struct qat_pci_device * +qat_pci_device_allocate(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_dev; + uint8_t qat_dev_id; + char name[QAT_DEV_NAME_MAX_LEN]; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); + if (qat_pci_get_named_dev(name) != NULL) { + QAT_LOG(ERR, "QAT device with name %s already allocated!", + name); + return NULL; + } + + qat_dev_id = qat_pci_find_free_device_index(); + if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) { + QAT_LOG(ERR, "Reached maximum number of QAT devices"); + return NULL; + } + + qat_dev = qat_pci_get_dev(qat_dev_id); + memset(qat_dev, 0, sizeof(*qat_dev)); + strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN); + qat_dev->qat_dev_id = qat_dev_id; + qat_dev->pci_dev = pci_dev; + switch (qat_dev->pci_dev->id.device_id) { + case 0x0443: + qat_dev->qat_dev_gen = QAT_GEN1; + break; + case 0x37c9: + case 0x19e3: + case 0x6f55: + qat_dev->qat_dev_gen = QAT_GEN2; + break; + case 0x18a1: + qat_dev->qat_dev_gen = QAT_GEN3; + break; + default: + QAT_LOG(ERR, "Invalid dev_id, can't determine generation"); + return NULL; + } + + rte_spinlock_init(&qat_dev->arb_csr_lock); + + qat_dev->attached = QAT_ATTACHED; + + qat_nb_pci_devices++; + + QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d", + qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices); + + return qat_dev; +} + +int +qat_pci_device_release(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_dev; + char name[QAT_DEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) + return -EINVAL; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); + qat_dev = qat_pci_get_named_dev(name); + if (qat_dev != NULL) { + + /* Check that there are no service devs still on pci device */ + if (qat_dev->sym_dev != NULL) + return -EBUSY; + + qat_dev->attached = QAT_DETACHED; + qat_nb_pci_devices--; + } + QAT_LOG(DEBUG, "QAT device %s released, total QATs %d", + name, qat_nb_pci_devices); + return 0; +} + +static int +qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev, + struct rte_pci_device *pci_dev) +{ + qat_sym_dev_destroy(qat_pci_dev); + qat_comp_dev_destroy(qat_pci_dev); + qat_asym_dev_destroy(qat_pci_dev); + return qat_pci_device_release(pci_dev); +} + +static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + int ret = 0; + int num_pmds_created = 0; + struct qat_pci_device *qat_pci_dev; + + QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x", + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + qat_pci_dev = qat_pci_device_allocate(pci_dev); + if (qat_pci_dev == NULL) + return -ENODEV; + + ret = qat_sym_dev_create(qat_pci_dev); + if (ret == 0) + num_pmds_created++; + else + QAT_LOG(WARNING, + "Failed to create QAT SYM PMD on device %s", + qat_pci_dev->name); + + ret = qat_comp_dev_create(qat_pci_dev); + if (ret == 0) + num_pmds_created++; + else + QAT_LOG(WARNING, + "Failed to create QAT COMP PMD on device %s", + qat_pci_dev->name); + + ret = qat_asym_dev_create(qat_pci_dev); + if (ret == 0) + num_pmds_created++; + else + QAT_LOG(WARNING, + "Failed to create QAT ASYM PMD on device %s", + qat_pci_dev->name); + + if (num_pmds_created == 0) + qat_pci_dev_destroy(qat_pci_dev, pci_dev); + + return 0; +} + +static int qat_pci_remove(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_pci_dev; + + if (pci_dev == NULL) + return -EINVAL; + + qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev); + if (qat_pci_dev == NULL) + return 0; + + return qat_pci_dev_destroy(qat_pci_dev, pci_dev); +} + +static struct rte_pci_driver rte_qat_pmd = { + .id_table = pci_id_qat_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = qat_pci_probe, + .remove = qat_pci_remove +}; + +__rte_weak int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__rte_weak int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__rte_weak int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__rte_weak int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__rte_weak int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__rte_weak int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd); +RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map); diff --git a/src/seastar/dpdk/drivers/common/qat/qat_device.h b/src/seastar/dpdk/drivers/common/qat/qat_device.h new file mode 100644 index 000000000..131375e83 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_device.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_DEVICE_H_ +#define _QAT_DEVICE_H_ + +#include <rte_bus_pci.h> + +#include "qat_common.h" +#include "qat_logs.h" +#include "adf_transport_access_macros.h" +#include "qat_qp.h" + +#define QAT_DETACHED (0) +#define QAT_ATTACHED (1) + +#define QAT_DEV_NAME_MAX_LEN 64 + +enum qat_comp_num_im_buffers { + QAT_NUM_INTERM_BUFS_GEN1 = 12, + QAT_NUM_INTERM_BUFS_GEN2 = 20, + QAT_NUM_INTERM_BUFS_GEN3 = 20 +}; + +/* + * This struct holds all the data about a QAT pci device + * including data about all services it supports. + * It contains + * - hw_data + * - config data + * - runtime data + */ +struct qat_sym_dev_private; +struct qat_asym_dev_private; +struct qat_comp_dev_private; + +struct qat_pci_device { + + /* Data used by all services */ + char name[QAT_DEV_NAME_MAX_LEN]; + /**< Name of qat pci device */ + uint8_t qat_dev_id; + /**< Device instance for this qat pci device */ + struct rte_pci_device *pci_dev; + /**< PCI information. */ + enum qat_device_gen qat_dev_gen; + /**< QAT device generation */ + rte_spinlock_t arb_csr_lock; + /**< lock to protect accesses to the arbiter CSR */ + __extension__ + uint8_t attached : 1; + /**< Flag indicating the device is attached */ + + struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE]; + /**< links to qps set up for each service, index same as on API */ + + /* Data relating to symmetric crypto service */ + struct qat_sym_dev_private *sym_dev; + /**< link back to cryptodev private data */ + struct rte_device sym_rte_dev; + /**< This represents the crypto sym subset of this pci device. + * Register with this rather than with the one in + * pci_dev so that its driver can have a crypto-specific name + */ + + /* Data relating to asymmetric crypto service */ + struct qat_asym_dev_private *asym_dev; + /**< link back to cryptodev private data */ + struct rte_device asym_rte_dev; + /**< This represents the crypto asym subset of this pci device. + * Register with this rather than with the one in + * pci_dev so that its driver can have a crypto-specific name + */ + + /* Data relating to compression service */ + struct qat_comp_dev_private *comp_dev; + /**< link back to compressdev private data */ + struct rte_device comp_rte_dev; + /**< This represents the compression subset of this pci device. + * Register with this rather than with the one in + * pci_dev so that its driver can have a compression-specific name + */ + + /* Data relating to asymmetric crypto service */ + +}; + +struct qat_gen_hw_data { + enum qat_device_gen dev_gen; + const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE]; + enum qat_comp_num_im_buffers comp_num_im_bufs_required; +}; + +extern struct qat_gen_hw_data qat_gen_config[]; + +struct qat_pci_device * +qat_pci_device_allocate(struct rte_pci_device *pci_dev); + +int +qat_pci_device_release(struct rte_pci_device *pci_dev); + +struct qat_pci_device * +qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev); + +/* declaration needed for weak functions */ +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +#endif /* _QAT_DEVICE_H_ */ diff --git a/src/seastar/dpdk/drivers/common/qat/qat_logs.c b/src/seastar/dpdk/drivers/common/qat/qat_logs.c new file mode 100644 index 000000000..7a8617096 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_logs.c @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include <rte_log.h> +#include <rte_hexdump.h> + +#include "qat_logs.h" + +int qat_gen_logtype; +int qat_dp_logtype; + +int +qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title, + const void *buf, unsigned int len) +{ + if (level > rte_log_get_global_level()) + return 0; + if (level > (uint32_t)(rte_log_get_level(logtype))) + return 0; + + rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file, + title, buf, len); + return 0; +} + +RTE_INIT(qat_pci_init_log) +{ + /* Non-data-path logging for pci device and all services */ + qat_gen_logtype = rte_log_register("pmd.qat_general"); + if (qat_gen_logtype >= 0) + rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE); + + /* data-path logging for all services */ + qat_dp_logtype = rte_log_register("pmd.qat_dp"); + if (qat_dp_logtype >= 0) + rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE); +} diff --git a/src/seastar/dpdk/drivers/common/qat/qat_logs.h b/src/seastar/dpdk/drivers/common/qat/qat_logs.h new file mode 100644 index 000000000..4baea12c3 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_logs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_LOGS_H_ +#define _QAT_LOGS_H_ + +extern int qat_gen_logtype; +extern int qat_dp_logtype; + +#define QAT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qat_gen_logtype, \ + "%s(): " fmt "\n", __func__, ## args) + +#define QAT_DP_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qat_dp_logtype, \ + "%s(): " fmt "\n", __func__, ## args) + +#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \ + qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len) + +/** + * qat_hexdump_log - Dump out memory in a special hex dump format. + * + * Dump out the message buffer in a special hex dump output format with + * characters printed for each line of 16 hex values. The message will be sent + * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file + * is undefined. + */ +int +qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title, + const void *buf, unsigned int len); + +#endif /* _QAT_LOGS_H_ */ diff --git a/src/seastar/dpdk/drivers/common/qat/qat_qp.c b/src/seastar/dpdk/drivers/common/qat/qat_qp.c new file mode 100644 index 000000000..131215296 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_qp.c @@ -0,0 +1,692 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include <rte_common.h> +#include <rte_dev.h> +#include <rte_malloc.h> +#include <rte_memzone.h> +#include <rte_pci.h> +#include <rte_bus_pci.h> +#include <rte_atomic.h> +#include <rte_prefetch.h> + +#include "qat_logs.h" +#include "qat_device.h" +#include "qat_qp.h" +#include "qat_sym.h" +#include "qat_asym.h" +#include "qat_comp.h" +#include "adf_transport_access_macros.h" + + +#define ADF_MAX_DESC 4096 +#define ADF_MIN_DESC 128 + +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value) + +__extension__ +const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 8, + .tx_msg_size = 64, + .rx_msg_size = 32, + + }, { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 9, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 2, + .rx_ring_num = 10, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 11, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 6, + .rx_ring_num = 14, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 7, + .rx_ring_num = 15, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + +__extension__ +const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 4, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 5, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 7, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + +static int qat_qp_check_queue_alignment(uint64_t phys_addr, + uint32_t queue_size_bytes); +static void qat_queue_delete(struct qat_queue *queue); +static int qat_queue_create(struct qat_pci_device *qat_dev, + struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); +static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *queue_size_for_csr); +static void adf_configure_queues(struct qat_qp *queue); +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); + + +int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service) +{ + int i, count; + + for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) + if (qp_hw_data[i].service_type == service) + count++; + return count; +} + +static const struct rte_memzone * +queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, + int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + QAT_LOG(DEBUG, "re-use memzone already " + "allocated for %s", queue_name); + return mz; + } + + QAT_LOG(ERR, "Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; + } + + QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); +} + +int qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, + uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf) + +{ + struct qat_qp *qp; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + char op_cookie_pool_name[RTE_RING_NAMESIZE]; + uint32_t i; + + QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", + queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen); + + if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) || + (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) { + QAT_LOG(ERR, "Can't create qp for %u descriptors", + qat_qp_conf->nb_descriptors); + return -EINVAL; + } + + if (pci_dev->mem_resource[0].addr == NULL) { + QAT_LOG(ERR, "Could not find VF config space " + "(UIO driver attached?)."); + return -EINVAL; + } + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("qat PMD qp metadata", + sizeof(*qp), RTE_CACHE_LINE_SIZE, + qat_qp_conf->socket_id); + if (qp == NULL) { + QAT_LOG(ERR, "Failed to alloc mem for qp struct"); + return -ENOMEM; + } + qp->nb_descriptors = qat_qp_conf->nb_descriptors; + qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer", + qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies), + RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id); + if (qp->op_cookies == NULL) { + QAT_LOG(ERR, "Failed to alloc mem for cookie"); + rte_free(qp); + return -ENOMEM; + } + + qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; + qp->inflights16 = 0; + + if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, + ADF_RING_DIR_TX) != 0) { + QAT_LOG(ERR, "Tx queue create failed " + "queue_pair_id=%u", queue_pair_id); + goto create_err; + } + + if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf, + ADF_RING_DIR_RX) != 0) { + QAT_LOG(ERR, "Rx queue create failed " + "queue_pair_id=%hu", queue_pair_id); + qat_queue_delete(&(qp->tx_q)); + goto create_err; + } + + adf_configure_queues(qp); + adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, + &qat_dev->arb_csr_lock); + + snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, + "%s%d_cookies_%s_qp%hu", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qat_qp_conf->service_str, queue_pair_id); + + QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name); + qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); + if (qp->op_cookie_pool == NULL) + qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, + qp->nb_descriptors, + qat_qp_conf->cookie_size, 64, 0, + NULL, NULL, NULL, NULL, + qat_dev->pci_dev->device.numa_node, + 0); + if (!qp->op_cookie_pool) { + QAT_LOG(ERR, "QAT PMD Cannot create" + " op mempool"); + goto create_err; + } + + for (i = 0; i < qp->nb_descriptors; i++) { + if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { + QAT_LOG(ERR, "QAT PMD Cannot get op_cookie"); + goto create_err; + } + memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size); + } + + qp->qat_dev_gen = qat_dev->qat_dev_gen; + qp->build_request = qat_qp_conf->build_request; + qp->service_type = qat_qp_conf->hw->service_type; + qp->qat_dev = qat_dev; + + QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s", + queue_pair_id, op_cookie_pool_name); + + *qp_addr = qp; + return 0; + +create_err: + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + rte_free(qp->op_cookies); + rte_free(qp); + return -EFAULT; +} + +int qat_qp_release(struct qat_qp **qp_addr) +{ + struct qat_qp *qp = *qp_addr; + uint32_t i; + + if (qp == NULL) { + QAT_LOG(DEBUG, "qp already freed"); + return 0; + } + + QAT_LOG(DEBUG, "Free qp on qat_pci device %d", + qp->qat_dev->qat_dev_id); + + /* Don't free memory if there are still responses to be processed */ + if (qp->inflights16 == 0) { + qat_queue_delete(&(qp->tx_q)); + qat_queue_delete(&(qp->rx_q)); + } else { + return -EAGAIN; + } + + adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, + &qp->qat_dev->arb_csr_lock); + + for (i = 0; i < qp->nb_descriptors; i++) + rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); + + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + + rte_free(qp->op_cookies); + rte_free(qp); + *qp_addr = NULL; + return 0; +} + + +static void qat_queue_delete(struct qat_queue *queue) +{ + const struct rte_memzone *mz; + int status = 0; + + if (queue == NULL) { + QAT_LOG(DEBUG, "Invalid queue"); + return; + } + QAT_LOG(DEBUG, "Free ring %d, memzone: %s", + queue->hw_queue_number, queue->memz_name); + + mz = rte_memzone_lookup(queue->memz_name); + if (mz != NULL) { + /* Write an unused pattern to the queue memory. */ + memset(queue->base_addr, 0x7F, queue->queue_size); + status = rte_memzone_free(mz); + if (status != 0) + QAT_LOG(ERR, "Error %d on freeing queue %s", + status, queue->memz_name); + } else { + QAT_LOG(DEBUG, "queue %s doesn't exist", + queue->memz_name); + } +} + +static int +qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, + struct qat_qp_config *qp_conf, uint8_t dir) +{ + uint64_t queue_base; + void *io_addr; + const struct rte_memzone *qp_mz; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + int ret = 0; + uint16_t desc_size = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); + uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size); + + queue->hw_bundle_number = qp_conf->hw->hw_bundle_num; + queue->hw_queue_number = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num); + + if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { + QAT_LOG(ERR, "Invalid descriptor size %d", desc_size); + return -EINVAL; + } + + /* + * Allocate a memzone for the queue - create a unique name. + */ + snprintf(queue->memz_name, sizeof(queue->memz_name), + "%s_%d_%s_%s_%d_%d", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qp_conf->service_str, "qp_mem", + queue->hw_bundle_number, queue->hw_queue_number); + qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, + qat_dev->pci_dev->device.numa_node); + if (qp_mz == NULL) { + QAT_LOG(ERR, "Failed to allocate ring memzone"); + return -ENOMEM; + } + + queue->base_addr = (char *)qp_mz->addr; + queue->base_phys_addr = qp_mz->iova; + if (qat_qp_check_queue_alignment(queue->base_phys_addr, + queue_size_bytes)) { + QAT_LOG(ERR, "Invalid alignment on queue create " + " 0x%"PRIx64"\n", + queue->base_phys_addr); + ret = -EFAULT; + goto queue_create_err; + } + + if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors, + &(queue->queue_size)) != 0) { + QAT_LOG(ERR, "Invalid num inflights"); + ret = -EINVAL; + goto queue_create_err; + } + + queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, + ADF_BYTES_TO_MSG_SIZE(desc_size)); + queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1; + + if (queue->max_inflights < 2) { + QAT_LOG(ERR, "Invalid num inflights"); + ret = -EINVAL; + goto queue_create_err; + } + queue->head = 0; + queue->tail = 0; + queue->msg_size = desc_size; + + /* + * Write an unused pattern to the queue memory. + */ + memset(queue->base_addr, 0x7F, queue_size_bytes); + + queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, + queue->queue_size); + + io_addr = pci_dev->mem_resource[0].addr; + + WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_base); + + QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," + " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u", + queue->memz_name, + queue->queue_size, queue_size_bytes, + qp_conf->nb_descriptors, desc_size, + queue->max_inflights, queue->modulo_mask); + + return 0; + +queue_create_err: + rte_memzone_free(qp_mz); + return ret; +} + +static int qat_qp_check_queue_alignment(uint64_t phys_addr, + uint32_t queue_size_bytes) +{ + if (((queue_size_bytes - 1) & phys_addr) != 0) + return -EINVAL; + return 0; +} + +static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *p_queue_size_for_csr) +{ + uint8_t i = ADF_MIN_RING_SIZE; + + for (; i <= ADF_MAX_RING_SIZE; i++) + if ((msg_size * msg_num) == + (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { + *p_queue_size_for_csr = i; + return 0; + } + QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); + return -EINVAL; +} + +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value |= (0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value &= ~(0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void adf_configure_queues(struct qat_qp *qp) +{ + uint32_t queue_config; + struct qat_queue *queue = &qp->tx_q; + + queue_config = BUILD_RING_CONFIG(queue->queue_size); + + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_config); + + queue = &qp->rx_q; + queue_config = + BUILD_RESP_RING_CONFIG(queue->queue_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_config); +} + +static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) +{ + return data & modulo_mask; +} + +static inline void +txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, q->tail); + q->nb_pending_requests = 0; + q->csr_tail = q->tail; +} + +static inline +void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) +{ + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = q->csr_head; + new_head = q->head; + max_head = qp->nb_descriptors * q->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)q->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); + memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); + } + q->nb_processed_responses = 0; + q->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); +} + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + register struct qat_queue *queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + register uint32_t nb_ops_sent = 0; + register int ret; + uint16_t nb_ops_possible = nb_ops; + register uint8_t *base_addr; + register uint32_t tail; + int overflow; + + if (unlikely(nb_ops == 0)) + return 0; + + /* read params used a lot in main loop into registers */ + queue = &(tmp_qp->tx_q); + base_addr = (uint8_t *)queue->base_addr; + tail = queue->tail; + + /* Find how many can actually fit on the ring */ + tmp_qp->inflights16 += nb_ops; + overflow = tmp_qp->inflights16 - queue->max_inflights; + if (overflow > 0) { + tmp_qp->inflights16 -= overflow; + nb_ops_possible = nb_ops - overflow; + if (nb_ops_possible == 0) + return 0; + } + + while (nb_ops_sent != nb_ops_possible) { + ret = tmp_qp->build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size], + tmp_qp->qat_dev_gen); + if (ret != 0) { + tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; + if (nb_ops_sent == 0) + return 0; + goto kick_tail; + } + + tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); + ops++; + nb_ops_sent++; + } +kick_tail: + queue->tail = tail; + tmp_qp->stats.enqueued_count += nb_ops_sent; + queue->nb_pending_requests += nb_ops_sent; + if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || + queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { + txq_write_tail(tmp_qp, queue); + } + return nb_ops_sent; +} + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + struct qat_queue *rx_queue, *tx_queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + uint32_t head; + uint32_t resp_counter = 0; + uint8_t *resp_msg; + + rx_queue = &(tmp_qp->rx_q); + tx_queue = &(tmp_qp->tx_q); + head = rx_queue->head; + resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; + + while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && + resp_counter != nb_ops) { + + if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) + qat_sym_process_response(ops, resp_msg); + else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) + qat_comp_process_response(ops, resp_msg, + &tmp_qp->stats.dequeue_err_count); + else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) { +#ifdef BUILD_QAT_ASYM + qat_asym_process_response(ops, resp_msg, + tmp_qp->op_cookies[head / rx_queue->msg_size]); +#endif + } + + head = adf_modulo(head + rx_queue->msg_size, + rx_queue->modulo_mask); + + resp_msg = (uint8_t *)rx_queue->base_addr + head; + ops++; + resp_counter++; + } + if (resp_counter > 0) { + rx_queue->head = head; + tmp_qp->stats.dequeued_count += resp_counter; + rx_queue->nb_processed_responses += resp_counter; + tmp_qp->inflights16 -= resp_counter; + + if (rx_queue->nb_processed_responses > + QAT_CSR_HEAD_WRITE_THRESH) + rxq_free_desc(tmp_qp, rx_queue); + } + /* also check if tail needs to be advanced */ + if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && + tx_queue->tail != tx_queue->csr_tail) { + txq_write_tail(tmp_qp, tx_queue); + } + return resp_counter; +} + +__rte_weak int +qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, + uint64_t *dequeue_err_count __rte_unused) +{ + return 0; +} diff --git a/src/seastar/dpdk/drivers/common/qat/qat_qp.h b/src/seastar/dpdk/drivers/common/qat/qat_qp.h new file mode 100644 index 000000000..9833bcbd8 --- /dev/null +++ b/src/seastar/dpdk/drivers/common/qat/qat_qp.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_QP_H_ +#define _QAT_QP_H_ + +#include "qat_common.h" +#include "adf_transport_access_macros.h" + +struct qat_pci_device; + +#define QAT_CSR_HEAD_WRITE_THRESH 32U +/* number of requests to accumulate before writing head CSR */ +#define QAT_CSR_TAIL_WRITE_THRESH 32U +/* number of requests to accumulate before writing tail CSR */ +#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U +/* number of inflights below which no tail write coalescing should occur */ + +typedef int (*build_request_t)(void *op, + uint8_t *req, void *op_cookie, + enum qat_device_gen qat_dev_gen); +/**< Build a request from an op. */ + +/** + * Structure with data needed for creation of queue pair. + */ +struct qat_qp_hw_data { + enum qat_service_type service_type; + uint8_t hw_bundle_num; + uint8_t tx_ring_num; + uint8_t rx_ring_num; + uint16_t tx_msg_size; + uint16_t rx_msg_size; +}; +/** + * Structure with data needed for creation of queue pair. + */ +struct qat_qp_config { + const struct qat_qp_hw_data *hw; + uint32_t nb_descriptors; + uint32_t cookie_size; + int socket_id; + build_request_t build_request; + const char *service_str; +}; + +/** + * Structure associated with each queue. + */ +struct qat_queue { + char memz_name[RTE_MEMZONE_NAMESIZE]; + void *base_addr; /* Base address */ + rte_iova_t base_phys_addr; /* Queue physical address */ + uint32_t head; /* Shadow copy of the head */ + uint32_t tail; /* Shadow copy of the tail */ + uint32_t modulo_mask; + uint32_t msg_size; + uint16_t max_inflights; + uint32_t queue_size; + uint8_t hw_bundle_number; + uint8_t hw_queue_number; + /* HW queue aka ring offset on bundle */ + uint32_t csr_head; /* last written head value */ + uint32_t csr_tail; /* last written tail value */ + uint16_t nb_processed_responses; + /* number of responses processed since last CSR head write */ + uint16_t nb_pending_requests; + /* number of requests pending since last CSR tail write */ +}; + +struct qat_qp { + void *mmap_bar_addr; + uint16_t inflights16; + struct qat_queue tx_q; + struct qat_queue rx_q; + struct qat_common_stats stats; + struct rte_mempool *op_cookie_pool; + void **op_cookies; + uint32_t nb_descriptors; + enum qat_device_gen qat_dev_gen; + build_request_t build_request; + enum qat_service_type service_type; + struct qat_pci_device *qat_dev; + /**< qat device this qp is on */ +} __rte_cache_aligned; + +extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE]; +extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE]; + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +int +qat_qp_release(struct qat_qp **qp_addr); + +int +qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf); + +int +qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service); + +/* Needed for weak function*/ +int +qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, + uint64_t *dequeue_err_count); + +#endif /* _QAT_QP_H_ */ |