summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/lib/librte_ipsec
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/dpdk/lib/librte_ipsec
parentInitial commit. (diff)
downloadceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz
ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/lib/librte_ipsec')
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/Makefile29
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/crypto.h182
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/esp_inb.c782
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/esp_outb.c700
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/iph.h279
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/ipsec_sad.c559
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/ipsec_sqn.h309
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/meson.build10
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/misc.h180
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/pad.h45
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/rte_ipsec.h171
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_group.h153
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sa.h182
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sad.h178
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_version.map21
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/sa.c764
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/sa.h199
-rw-r--r--src/spdk/dpdk/lib/librte_ipsec/ses.c53
18 files changed, 4796 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_ipsec/Makefile b/src/spdk/dpdk/lib/librte_ipsec/Makefile
new file mode 100644
index 000000000..e4c69646b
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ipsec.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_net
+LDLIBS += -lrte_cryptodev -lrte_security -lrte_hash
+
+EXPORT_MAP := rte_ipsec_version.map
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_inb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_outb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += ses.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += ipsec_sad.c
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_group.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_sa.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_sad.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/lib/librte_ipsec/crypto.h b/src/spdk/dpdk/lib/librte_ipsec/crypto.h
new file mode 100644
index 000000000..3d0303459
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/crypto.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _CRYPTO_H_
+#define _CRYPTO_H_
+
+/**
+ * @file crypto.h
+ * Contains crypto specific functions/structures/macros used internally
+ * by ipsec library.
+ */
+
+/*
+ * AES-CTR counter block format.
+ */
+
+struct aesctr_cnt_blk {
+ uint32_t nonce;
+ uint64_t iv;
+ uint32_t cnt;
+} __rte_packed;
+
+ /*
+ * AES-GCM devices have some specific requirements for IV and AAD formats.
+ * Ideally that to be done by the driver itself.
+ */
+
+struct aead_gcm_iv {
+ uint32_t salt;
+ uint64_t iv;
+ uint32_t cnt;
+} __rte_packed;
+
+struct aead_gcm_aad {
+ uint32_t spi;
+ /*
+ * RFC 4106, section 5:
+ * Two formats of the AAD are defined:
+ * one for 32-bit sequence numbers, and one for 64-bit ESN.
+ */
+ union {
+ uint32_t u32[2];
+ uint64_t u64;
+ } sqn;
+ uint32_t align0; /* align to 16B boundary */
+} __rte_packed;
+
+struct gcm_esph_iv {
+ struct rte_esp_hdr esph;
+ uint64_t iv;
+} __rte_packed;
+
+static inline void
+aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)
+{
+ ctr->nonce = nonce;
+ ctr->iv = iv;
+ ctr->cnt = rte_cpu_to_be_32(1);
+}
+
+static inline void
+aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
+{
+ gcm->salt = salt;
+ gcm->iv = iv;
+ gcm->cnt = rte_cpu_to_be_32(1);
+}
+
+/*
+ * RFC 4106, 5 AAD Construction
+ * spi and sqn should already be converted into network byte order.
+ * Make sure that not used bytes are zeroed.
+ */
+static inline void
+aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
+ int esn)
+{
+ aad->spi = spi;
+ if (esn)
+ aad->sqn.u64 = sqn;
+ else {
+ aad->sqn.u32[0] = sqn_low32(sqn);
+ aad->sqn.u32[1] = 0;
+ }
+ aad->align0 = 0;
+}
+
+static inline void
+gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
+{
+ iv[0] = sqn;
+ iv[1] = 0;
+}
+
+/*
+ * Helper routine to copy IV
+ * Right now we support only algorithms with IV length equals 0/8/16 bytes.
+ */
+static inline void
+copy_iv(uint64_t dst[IPSEC_MAX_IV_QWORD],
+ const uint64_t src[IPSEC_MAX_IV_QWORD], uint32_t len)
+{
+ RTE_BUILD_BUG_ON(IPSEC_MAX_IV_SIZE != 2 * sizeof(uint64_t));
+
+ switch (len) {
+ case IPSEC_MAX_IV_SIZE:
+ dst[1] = src[1];
+ /* fallthrough */
+ case sizeof(uint64_t):
+ dst[0] = src[0];
+ /* fallthrough */
+ case 0:
+ break;
+ default:
+ /* should never happen */
+ RTE_ASSERT(NULL);
+ }
+}
+
+/*
+ * from RFC 4303 3.3.2.1.4:
+ * If the ESN option is enabled for the SA, the high-order 32
+ * bits of the sequence number are appended after the Next Header field
+ * for purposes of this computation, but are not transmitted.
+ */
+
+/*
+ * Helper function that moves ICV by 4B below, and inserts SQN.hibits.
+ * icv parameter points to the new start of ICV.
+ */
+static inline void
+insert_sqh(uint32_t sqh, void *picv, uint32_t icv_len)
+{
+ uint32_t *icv;
+ int32_t i;
+
+ RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
+
+ icv = picv;
+ icv_len = icv_len / sizeof(uint32_t);
+ for (i = icv_len; i-- != 0; icv[i] = icv[i - 1])
+ ;
+
+ icv[i] = sqh;
+}
+
+/*
+ * Helper function that moves ICV by 4B up, and removes SQN.hibits.
+ * icv parameter points to the new start of ICV.
+ */
+static inline void
+remove_sqh(void *picv, uint32_t icv_len)
+{
+ uint32_t i, *icv;
+
+ RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
+
+ icv = picv;
+ icv_len = icv_len / sizeof(uint32_t);
+ for (i = 0; i != icv_len; i++)
+ icv[i] = icv[i + 1];
+}
+
+/*
+ * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
+ */
+static inline void
+lksd_none_cop_prepare(struct rte_crypto_op *cop,
+ struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
+{
+ struct rte_crypto_sym_op *sop;
+
+ sop = cop->sym;
+ cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ sop->m_src = mb;
+ __rte_crypto_sym_op_attach_sym_session(sop, cs);
+}
+
+#endif /* _CRYPTO_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/esp_inb.c b/src/spdk/dpdk/lib/librte_ipsec/esp_inb.c
new file mode 100644
index 000000000..96eec0131
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/esp_inb.c
@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
+ struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
+ uint8_t sqh_len);
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by inb_cop_prepare(), see below.
+ */
+static inline void
+sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->cipher.data.length = plen - sa->ctp.cipher.length;
+ sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+ sop->auth.data.length = plen - sa->ctp.auth.length;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+}
+
+/*
+ * helper function to fill crypto_sym op for aead algorithms
+ * used by inb_cop_prepare(), see below.
+ */
+static inline void
+sop_aead_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->aead.data.length = plen - sa->ctp.cipher.length;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+}
+
+/*
+ * setup crypto op and crypto sym op for ESP inbound packet.
+ */
+static inline void
+inb_cop_prepare(struct rte_crypto_op *cop,
+ const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
+{
+ struct rte_crypto_sym_op *sop;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivc, *ivp;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct rte_esp_hdr));
+
+ /* fill sym op fields */
+ sop = cop->sym;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ sop_aead_prepare(sop, sa, icv, pofs, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
+
+ /* copy iv from the input packet to the cop */
+ ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
+ copy_iv(ivc, ivp, sa->iv_len);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
+
+ /* fill CTR block (located inside crypto op) */
+ ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+ sa->iv_ofs);
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
+ break;
+ }
+}
+
+static inline uint32_t
+inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t *pofs, uint32_t plen, void *iv)
+{
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivp;
+ uint32_t clen;
+
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ *pofs + sizeof(struct rte_esp_hdr));
+ clen = 0;
+
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = (struct aead_gcm_iv *)iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ copy_iv(iv, ivp, sa->iv_len);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = (struct aesctr_cnt_blk *)iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+
+ *pofs += sa->ctp.auth.offset;
+ clen = plen - sa->ctp.auth.length;
+ return clen;
+}
+
+/*
+ * Helper function for prepare() to deal with situation when
+ * ICV is spread by two segments. Tries to move ICV completely into the
+ * last segment.
+ */
+static struct rte_mbuf *
+move_icv(struct rte_mbuf *ml, uint32_t ofs)
+{
+ uint32_t n;
+ struct rte_mbuf *ms;
+ const void *prev;
+ void *new;
+
+ ms = ml->next;
+ n = ml->data_len - ofs;
+
+ prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
+ new = rte_pktmbuf_prepend(ms, n);
+ if (new == NULL)
+ return NULL;
+
+ /* move n ICV bytes from ml into ms */
+ rte_memcpy(new, prev, n);
+ ml->data_len -= n;
+
+ return ms;
+}
+
+/*
+ * for pure cryptodev (lookaside none) depending on SA settings,
+ * we might have to write some extra data to the packet.
+ */
+static inline void
+inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const union sym_op_data *icv)
+{
+ struct aead_gcm_aad *aad;
+
+ /* insert SQN.hi between ESP trailer and ICV */
+ if (sa->sqh_len != 0)
+ insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
+
+ /*
+ * fill AAD fields, if any (aad fields are placed after icv),
+ * right now we support only one AEAD algorithm: AES-GCM.
+ */
+ if (sa->aad_len != 0) {
+ aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ }
+}
+
+static inline int
+inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+ struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
+{
+ int32_t rc;
+ uint64_t sqn;
+ struct rte_esp_hdr *esph;
+
+ esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
+
+ /*
+ * retrieve and reconstruct SQN, then check it, then
+ * convert it back into network byte order.
+ */
+ sqn = rte_be_to_cpu_32(esph->seq);
+ if (IS_ESN(sa))
+ sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+ *sqc = rte_cpu_to_be_64(sqn);
+
+ /* check IPsec window */
+ rc = esn_inb_check_sqn(rsn, sa, sqn);
+
+ return rc;
+}
+
+/* prepare packet for upcoming processing */
+static inline int32_t
+inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t hlen, union sym_op_data *icv)
+{
+ uint32_t clen, icv_len, icv_ofs, plen;
+ struct rte_mbuf *ml;
+
+ /* start packet manipulation */
+ plen = mb->pkt_len;
+ plen = plen - hlen;
+
+ /* check that packet has a valid length */
+ clen = plen - sa->ctp.cipher.length;
+ if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
+ return -EBADMSG;
+
+ /* find ICV location */
+ icv_len = sa->icv_len;
+ icv_ofs = mb->pkt_len - icv_len;
+
+ ml = mbuf_get_seg_ofs(mb, &icv_ofs);
+
+ /*
+ * if ICV is spread by two segments, then try to
+ * move ICV completely into the last segment.
+ */
+ if (ml->data_len < icv_ofs + icv_len) {
+
+ ml = move_icv(ml, icv_ofs);
+ if (ml == NULL)
+ return -ENOSPC;
+
+ /* new ICV location */
+ icv_ofs = 0;
+ }
+
+ icv_ofs += sa->sqh_len;
+
+ /*
+ * we have to allocate space for AAD somewhere,
+ * right now - just use free trailing space at the last segment.
+ * Would probably be more convenient to reserve space for AAD
+ * inside rte_crypto_op itself
+ * (again for IV space is already reserved inside cop).
+ */
+ if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
+
+ /*
+ * if esn is used then high-order 32 bits are also used in ICV
+ * calculation but are not transmitted, update packet length
+ * to be consistent with auth data length and offset, this will
+ * be subtracted from packet length in post crypto processing
+ */
+ mb->pkt_len += sa->sqh_len;
+ ml->data_len += sa->sqh_len;
+
+ return plen;
+}
+
+static inline int32_t
+inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+ struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+{
+ int rc;
+ rte_be64_t sqn;
+
+ rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
+ if (rc != 0)
+ return rc;
+
+ rc = inb_prepare(sa, mb, hlen, icv);
+ if (rc < 0)
+ return rc;
+
+ inb_pkt_xprepare(sa, sqn, icv);
+ return rc;
+}
+
+/*
+ * setup/update packets and crypto ops for ESP inbound case.
+ */
+uint16_t
+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, hl;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+ rsn = rsn_acquire(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ hl = mb[i]->l2_len + mb[i]->l3_len;
+ rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+ if (rc >= 0) {
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
+ k++;
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ rsn_release(sa, rsn);
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ return k;
+}
+
+/*
+ * Start with processing inbound packet.
+ * This is common part for both tunnel and transport mode.
+ * Extract information that will be needed later from mbuf metadata and
+ * actual packet data:
+ * - mbuf for packet's last segment
+ * - length of the L2/L3 headers
+ * - esp tail structure
+ */
+static inline void
+process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
+ struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
+{
+ const struct rte_esp_tail *pt;
+ uint32_t ofs;
+
+ ofs = mb->pkt_len - tlen;
+ hlen[0] = mb->l2_len + mb->l3_len;
+ ml[0] = mbuf_get_seg_ofs(mb, &ofs);
+ pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
+ tofs[0] = ofs;
+ espt[0] = pt[0];
+}
+
+/*
+ * Helper function to check pad bytes values.
+ * Note that pad bytes can be spread across multiple segments.
+ */
+static inline int
+check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
+{
+ const uint8_t *pd;
+ uint32_t k, n;
+
+ for (n = 0; n != len; n += k, mb = mb->next) {
+ k = mb->data_len - ofs;
+ k = RTE_MIN(k, len - n);
+ pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
+ if (memcmp(pd, esp_pad_bytes + n, k) != 0)
+ break;
+ ofs = 0;
+ }
+
+ return len - n;
+}
+
+/*
+ * packet checks for transport mode:
+ * - no reported IPsec related failures in ol_flags
+ * - tail and header lengths are valid
+ * - padding bytes are valid
+ * apart from checks, function also updates tail offset (and segment)
+ * by taking into account pad length.
+ */
+static inline int32_t
+trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
+ uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
+{
+ if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
+ tlen + hlen > mb->pkt_len)
+ return -EBADMSG;
+
+ /* padding bytes are spread over multiple segments */
+ if (tofs[0] < espt.pad_len) {
+ tofs[0] = mb->pkt_len - tlen;
+ ml[0] = mbuf_get_seg_ofs(mb, tofs);
+ } else
+ tofs[0] -= espt.pad_len;
+
+ return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
+}
+
+/*
+ * packet checks for tunnel mode:
+ * - same as for trasnport mode
+ * - esp tail next proto contains expected for that SA value
+ */
+static inline int32_t
+tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
+ uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
+ uint8_t proto)
+{
+ return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
+ espt.next_proto != proto);
+}
+
+/*
+ * step two for tunnel mode:
+ * - read SQN value (for future use)
+ * - cut of ICV, ESP tail and padding bytes
+ * - cut of ESP header and IV, also if needed - L2/L3 headers
+ * (controlled by *adj* value)
+ */
+static inline void *
+tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
+ uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
+{
+ const struct rte_esp_hdr *ph;
+
+ /* read SQN value */
+ ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
+ sqn[0] = ph->seq;
+
+ /* cut of ICV, ESP tail and padding bytes */
+ mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
+
+ /* cut of L2/L3 headers, ESP header and IV */
+ return rte_pktmbuf_adj(mb, adj);
+}
+
+/*
+ * step two for transport mode:
+ * - read SQN value (for future use)
+ * - cut of ICV, ESP tail and padding bytes
+ * - cut of ESP header and IV
+ * - move L2/L3 header to fill the gap after ESP header removal
+ */
+static inline void *
+trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
+ uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
+{
+ char *np, *op;
+
+ /* get start of the packet before modifications */
+ op = rte_pktmbuf_mtod(mb, char *);
+
+ /* cut off ESP header and IV */
+ np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
+
+ /* move header bytes to fill the gap after ESP header removal */
+ remove_esph(np, op, hlen);
+ return np;
+}
+
+/*
+ * step three for transport mode:
+ * update mbuf metadata:
+ * - packet_type
+ * - ol_flags
+ */
+static inline void
+trs_process_step3(struct rte_mbuf *mb)
+{
+ /* reset mbuf packet type */
+ mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+}
+
+/*
+ * step three for tunnel mode:
+ * update mbuf metadata:
+ * - packet_type
+ * - ol_flags
+ * - tx_offload
+ */
+static inline void
+tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
+{
+ /* reset mbuf metatdata: L2/L3 len, packet type */
+ mb->packet_type = RTE_PTYPE_UNKNOWN;
+ mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
+
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+}
+
+/*
+ * *process* function for tunnel packets
+ */
+static inline uint16_t
+tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
+{
+ uint32_t adj, i, k, tl;
+ uint32_t hl[num], to[num];
+ struct rte_esp_tail espt[num];
+ struct rte_mbuf *ml[num];
+ const void *outh;
+ void *inh;
+
+ /*
+ * remove icv, esp trailer and high-order
+ * 32 bits of esn from packet length
+ */
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
+ const uint32_t cofs = sa->ctp.cipher.offset;
+
+ /*
+ * to minimize stalls due to load latency,
+ * read mbufs metadata and esp tail first.
+ */
+ for (i = 0; i != num; i++)
+ process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ adj = hl[i] + cofs;
+ tl = tlen + espt[i].pad_len;
+
+ /* check that packet is valid */
+ if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
+ sa->proto) == 0) {
+
+ outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
+ mb[i]->l2_len);
+
+ /* modify packet's layout */
+ inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
+ to[i], tl, sqn + k);
+
+ /* update inner ip header */
+ update_tun_inb_l3hdr(sa, outh, inh);
+
+ /* update mbuf's metadata */
+ tun_process_step3(mb[i], sa->tx_offload.msk,
+ sa->tx_offload.val);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ return k;
+}
+
+/*
+ * *process* function for tunnel packets
+ */
+static inline uint16_t
+trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
+{
+ char *np;
+ uint32_t i, k, l2, tl;
+ uint32_t hl[num], to[num];
+ struct rte_esp_tail espt[num];
+ struct rte_mbuf *ml[num];
+
+ /*
+ * remove icv, esp trailer and high-order
+ * 32 bits of esn from packet length
+ */
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
+ const uint32_t cofs = sa->ctp.cipher.offset;
+
+ /*
+ * to minimize stalls due to load latency,
+ * read mbufs metadata and esp tail first.
+ */
+ for (i = 0; i != num; i++)
+ process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ tl = tlen + espt[i].pad_len;
+ l2 = mb[i]->l2_len;
+
+ /* check that packet is valid */
+ if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
+ hl[i] + cofs, tl) == 0) {
+
+ /* modify packet's layout */
+ np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
+ to[i], tl, sqn + k);
+ update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
+ l2, hl[i] - l2, espt[i].next_proto);
+
+ /* update mbuf's metadata */
+ trs_process_step3(mb[i]);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ return k;
+}
+
+/*
+ * for group of ESP inbound packets perform SQN check and update.
+ */
+static inline uint16_t
+esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
+ uint32_t dr[], uint16_t num)
+{
+ uint32_t i, k;
+ struct replay_sqn *rsn;
+
+ /* replay not enabled */
+ if (sa->replay.win_sz == 0)
+ return num;
+
+ rsn = rsn_update_start(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
+ k++;
+ else
+ dr[i - k] = i;
+ }
+
+ rsn_update_finish(sa, rsn);
+ return k;
+}
+
+/*
+ * process group of ESP inbound packets.
+ */
+static inline uint16_t
+esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
+{
+ uint32_t k, n;
+ uint32_t sqn[num];
+ uint32_t dr[num];
+
+ /* process packets, extract seq numbers */
+ k = process(sa, mb, sqn, dr, num, sqh_len);
+
+ /* handle unprocessed mbufs */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ /* update SQN and replay window */
+ n = esp_inb_rsn_update(sa, sqn, dr, k);
+
+ /* handle mbufs with wrong SQN */
+ if (n != k && n != 0)
+ move_bad_mbufs(mb, dr, k, k - n);
+
+ if (n != num)
+ rte_errno = EBADMSG;
+
+ return n;
+}
+
+/*
+ * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
+ * (synchronous mode).
+ */
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k;
+ struct rte_ipsec_sa *sa;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ void *iv[num];
+ void *aad[num];
+ void *dgst[num];
+ uint32_t dr[num];
+ uint32_t l4ofs[num];
+ uint32_t clen[num];
+ uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
+
+ sa = ss->sa;
+
+ /* grab rsn lock */
+ rsn = rsn_acquire(sa);
+
+ /* do preparation for all packets */
+ for (i = 0, k = 0; i != num; i++) {
+
+ /* calculate ESP header offset */
+ l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
+
+ /* prepare ESP packet for processing */
+ rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
+ if (rc >= 0) {
+ /* get encrypted data offset and length */
+ clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
+ l4ofs + k, rc, ivbuf[k]);
+
+ /* fill iv, digest and aad */
+ iv[k] = ivbuf[k];
+ aad[k] = icv.va + sa->icv_len;
+ dgst[k++] = icv.va;
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* release rsn lock */
+ rsn_release(sa, rsn);
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ /* convert mbufs to iovecs and do actual crypto/auth processing */
+ if (k != 0)
+ cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
+ l4ofs, clen, k);
+ return k;
+}
+
+/*
+ * process group of ESP inbound tunnel packets.
+ */
+uint16_t
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
+}
+
+uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
+}
+
+/*
+ * process group of ESP inbound transport packets.
+ */
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
+}
+
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
+}
diff --git a/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c b/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c
new file mode 100644
index 000000000..fb9d5864c
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/esp_outb.c
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ union sym_op_data *icv, uint8_t sqh_len);
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by outb_cop_prepare(), see below.
+ */
+static inline void
+sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
+ sop->cipher.data.length = sa->ctp.cipher.length + plen;
+ sop->auth.data.offset = sa->ctp.auth.offset + pofs;
+ sop->auth.data.length = sa->ctp.auth.length + plen;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+}
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by outb_cop_prepare(), see below.
+ */
+static inline void
+sop_aead_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
+ sop->aead.data.length = sa->ctp.cipher.length + plen;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+}
+
+/*
+ * setup crypto op and crypto sym op for ESP outbound packet.
+ */
+static inline void
+outb_cop_prepare(struct rte_crypto_op *cop,
+ const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
+ const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
+{
+ struct rte_crypto_sym_op *sop;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+
+ /* fill sym op fields */
+ sop = cop->sym;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_CBC:
+ /* Cipher-Auth (AES-CBC *) case */
+ case ALGO_TYPE_3DES_CBC:
+ /* Cipher-Auth (3DES-CBC *) case */
+ case ALGO_TYPE_NULL:
+ /* NULL case */
+ sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
+ break;
+ case ALGO_TYPE_AES_GCM:
+ /* AEAD (AES_GCM) case */
+ sop_aead_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ /* Cipher-Auth (AES-CTR *) case */
+ sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill CTR block (located inside crypto op) */
+ ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+ sa->iv_ofs);
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+}
+
+/*
+ * setup/update packet data and metadata for ESP outbound tunnel case.
+ */
+static inline int32_t
+outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ union sym_op_data *icv, uint8_t sqh_len)
+{
+ uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
+ struct rte_mbuf *ml;
+ struct rte_esp_hdr *esph;
+ struct rte_esp_tail *espt;
+ char *ph, *pt;
+ uint64_t *iv;
+
+ /* calculate extra header space required */
+ hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
+
+ /* size of ipsec protected data */
+ l2len = mb->l2_len;
+ plen = mb->pkt_len - l2len;
+
+ /* number of bytes to encrypt */
+ clen = plen + sizeof(*espt);
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len + sqh_len;
+
+ /* do append and prepend */
+ ml = rte_pktmbuf_lastseg(mb);
+ if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ /* prepend header */
+ ph = rte_pktmbuf_prepend(mb, hlen - l2len);
+ if (ph == NULL)
+ return -ENOSPC;
+
+ /* append tail */
+ pdofs = ml->data_len;
+ ml->data_len += tlen;
+ mb->pkt_len += tlen;
+ pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
+
+ /* update pkt l2/l3 len */
+ mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
+ sa->tx_offload.val;
+
+ /* copy tunnel pkt header */
+ rte_memcpy(ph, sa->hdr, sa->hdr_len);
+
+ /* update original and new ip header fields */
+ update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
+ mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
+
+ /* update spi, seqn and iv */
+ esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
+ iv = (uint64_t *)(esph + 1);
+ copy_iv(iv, ivp, sa->iv_len);
+
+ esph->spi = sa->spi;
+ esph->seq = sqn_low32(sqc);
+
+ /* offset for ICV */
+ pdofs += pdlen + sa->sqh_len;
+
+ /* pad length */
+ pdlen -= sizeof(*espt);
+
+ /* copy padding data */
+ rte_memcpy(pt, esp_pad_bytes, pdlen);
+
+ /* update esp trailer */
+ espt = (struct rte_esp_tail *)(pt + pdlen);
+ espt->pad_len = pdlen;
+ espt->next_proto = sa->proto;
+
+ /* set icv va/pa value(s) */
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+
+ return clen;
+}
+
+/*
+ * for pure cryptodev (lookaside none) depending on SA settings,
+ * we might have to write some extra data to the packet.
+ */
+static inline void
+outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const union sym_op_data *icv)
+{
+ uint32_t *psqh;
+ struct aead_gcm_aad *aad;
+
+ /* insert SQN.hi between ESP trailer and ICV */
+ if (sa->sqh_len != 0) {
+ psqh = (uint32_t *)(icv->va - sa->sqh_len);
+ psqh[0] = sqn_hi32(sqc);
+ }
+
+ /*
+ * fill IV and AAD fields, if any (aad fields are placed after icv),
+ * right now we support only one AEAD algorithm: AES-GCM .
+ */
+ if (sa->aad_len != 0) {
+ aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ }
+}
+
+/*
+ * setup/update packets and crypto ops for ESP outbound tunnel case.
+ */
+uint16_t
+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
+ sa->sqh_len);
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ return k;
+}
+
+/*
+ * setup/update packet data and metadata for ESP outbound transport case.
+ */
+static inline int32_t
+outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ union sym_op_data *icv, uint8_t sqh_len)
+{
+ uint8_t np;
+ uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
+ struct rte_mbuf *ml;
+ struct rte_esp_hdr *esph;
+ struct rte_esp_tail *espt;
+ char *ph, *pt;
+ uint64_t *iv;
+ uint32_t l2len, l3len;
+
+ l2len = mb->l2_len;
+ l3len = mb->l3_len;
+
+ uhlen = l2len + l3len;
+ plen = mb->pkt_len - uhlen;
+
+ /* calculate extra header space required */
+ hlen = sa->iv_len + sizeof(*esph);
+
+ /* number of bytes to encrypt */
+ clen = plen + sizeof(*espt);
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len + sqh_len;
+
+ /* do append and insert */
+ ml = rte_pktmbuf_lastseg(mb);
+ if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ /* prepend space for ESP header */
+ ph = rte_pktmbuf_prepend(mb, hlen);
+ if (ph == NULL)
+ return -ENOSPC;
+
+ /* append tail */
+ pdofs = ml->data_len;
+ ml->data_len += tlen;
+ mb->pkt_len += tlen;
+ pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
+
+ /* shift L2/L3 headers */
+ insert_esph(ph, ph + hlen, uhlen);
+
+ /* update ip header fields */
+ np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
+ l3len, IPPROTO_ESP);
+
+ /* update spi, seqn and iv */
+ esph = (struct rte_esp_hdr *)(ph + uhlen);
+ iv = (uint64_t *)(esph + 1);
+ copy_iv(iv, ivp, sa->iv_len);
+
+ esph->spi = sa->spi;
+ esph->seq = sqn_low32(sqc);
+
+ /* offset for ICV */
+ pdofs += pdlen + sa->sqh_len;
+
+ /* pad length */
+ pdlen -= sizeof(*espt);
+
+ /* copy padding data */
+ rte_memcpy(pt, esp_pad_bytes, pdlen);
+
+ /* update esp trailer */
+ espt = (struct rte_esp_tail *)(pt + pdlen);
+ espt->pad_len = pdlen;
+ espt->next_proto = np;
+
+ /* set icv va/pa value(s) */
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+
+ return clen;
+}
+
+/*
+ * setup/update packets and crypto ops for ESP outbound transport case.
+ */
+uint16_t
+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n, l2, l3;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
+ sa->sqh_len);
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ return k;
+}
+
+
+static inline uint32_t
+outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
+ uint32_t plen, void *iv)
+{
+ uint64_t *ivp = iv;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint32_t clen;
+
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+
+ *pofs += sa->ctp.auth.offset;
+ clen = plen + sa->ctp.auth.length;
+ return clen;
+}
+
+static uint16_t
+cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num,
+ esp_outb_prepare_t prepare, uint32_t cofs_mask)
+{
+ int32_t rc;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ uint32_t i, k, n;
+ uint32_t l2, l3;
+ union sym_op_data icv;
+ void *iv[num];
+ void *aad[num];
+ void *dgst[num];
+ uint32_t dr[num];
+ uint32_t l4ofs[num];
+ uint32_t clen[num];
+ uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ for (i = 0, k = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ /* calculate ESP header offset */
+ l4ofs[k] = (l2 + l3) & cofs_mask;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(ivbuf[k], sqc);
+
+ /* try to update the packet itself */
+ rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
+
+ /* success, proceed with preparations */
+ if (rc >= 0) {
+
+ outb_pkt_xprepare(sa, sqc, &icv);
+
+ /* get encrypted data offset and length */
+ clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
+ ivbuf[k]);
+
+ /* fill iv, digest and aad */
+ iv[k] = ivbuf[k];
+ aad[k] = icv.va + sa->icv_len;
+ dgst[k++] = icv.va;
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ /* convert mbufs to iovecs and do actual crypto/auth processing */
+ if (k != 0)
+ cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
+ l4ofs, clen, k);
+ return k;
+}
+
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
+}
+
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
+ UINT32_MAX);
+}
+
+/*
+ * process outbound packets for SA with ESN support,
+ * for algorithms that require SQN.hibits to be implictly included
+ * into digest computation.
+ * In that case we have to move ICV bytes back to their proper place.
+ */
+uint16_t
+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num)
+{
+ uint32_t i, k, icv_len, *icv;
+ struct rte_mbuf *ml;
+ struct rte_ipsec_sa *sa;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ k = 0;
+ icv_len = sa->icv_len;
+
+ for (i = 0; i != num; i++) {
+ if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
+ ml = rte_pktmbuf_lastseg(mb[i]);
+ /* remove high-order 32 bits of esn from packet len */
+ mb[i]->pkt_len -= sa->sqh_len;
+ ml->data_len -= sa->sqh_len;
+ icv = rte_pktmbuf_mtod_offset(ml, void *,
+ ml->data_len - icv_len);
+ remove_sqh(icv, icv_len);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num) {
+ rte_errno = EBADMSG;
+ if (k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ return k;
+}
+
+/*
+ * prepare packets for inline ipsec processing:
+ * set ol_flags and attach metadata.
+ */
+static inline void
+inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ uint32_t i, ol_flags;
+
+ ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
+ for (i = 0; i != num; i++) {
+
+ mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ if (ol_flags != 0)
+ rte_security_set_pkt_metadata(ss->security.ctx,
+ ss->security.ses, mb[i], NULL);
+ }
+}
+
+/*
+ * process group of ESP outbound tunnel packets destined for
+ * INLINE_CRYPTO type of device.
+ */
+uint16_t
+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+
+ k += (rc >= 0);
+
+ /* failure, put packet into the death-row */
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not processed mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ inline_outb_mbuf_prepare(ss, mb, k);
+ return k;
+}
+
+/*
+ * process group of ESP outbound transport packets destined for
+ * INLINE_CRYPTO type of device.
+ */
+uint16_t
+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+
+ k += (rc >= 0);
+
+ /* failure, put packet into the death-row */
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not processed mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ inline_outb_mbuf_prepare(ss, mb, k);
+ return k;
+}
+
+/*
+ * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ * actual processing is done by HW/PMD, just set flags and metadata.
+ */
+uint16_t
+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ inline_outb_mbuf_prepare(ss, mb, num);
+ return num;
+}
diff --git a/src/spdk/dpdk/lib/librte_ipsec/iph.h b/src/spdk/dpdk/lib/librte_ipsec/iph.h
new file mode 100644
index 000000000..861f16905
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/iph.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IPH_H_
+#define _IPH_H_
+
+#include <rte_ip.h>
+
+/**
+ * @file iph.h
+ * Contains functions/structures/macros to manipulate IPv4/IPv6 headers
+ * used internally by ipsec library.
+ */
+
+/*
+ * Move preceding (L3) headers down to remove ESP header and IV.
+ */
+static inline void
+remove_esph(char *np, char *op, uint32_t hlen)
+{
+ uint32_t i;
+
+ for (i = hlen; i-- != 0; np[i] = op[i])
+ ;
+}
+
+/*
+ * Move preceding (L3) headers up to free space for ESP header and IV.
+ */
+static inline void
+insert_esph(char *np, char *op, uint32_t hlen)
+{
+ uint32_t i;
+
+ for (i = 0; i != hlen; i++)
+ np[i] = op[i];
+}
+
+/* update original ip header fields for transport case */
+static inline int
+update_trs_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
+ uint32_t l2len, uint32_t l3len, uint8_t proto)
+{
+ int32_t rc;
+
+ /* IPv4 */
+ if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
+ struct rte_ipv4_hdr *v4h;
+
+ v4h = p;
+ rc = v4h->next_proto_id;
+ v4h->next_proto_id = proto;
+ v4h->total_length = rte_cpu_to_be_16(plen - l2len);
+ /* IPv6 */
+ } else {
+ struct rte_ipv6_hdr *v6h;
+ uint8_t *p_nh;
+
+ v6h = p;
+
+ /* basic IPv6 header with no extensions */
+ if (l3len == sizeof(struct rte_ipv6_hdr))
+ p_nh = &v6h->proto;
+
+ /* IPv6 with extensions */
+ else {
+ size_t ext_len;
+ int nh;
+ uint8_t *pd, *plimit;
+
+ /* locate last extension within l3len bytes */
+ pd = (uint8_t *)p;
+ plimit = pd + l3len;
+ ext_len = sizeof(struct rte_ipv6_hdr);
+ nh = v6h->proto;
+ while (pd + ext_len < plimit) {
+ pd += ext_len;
+ nh = rte_ipv6_get_next_ext(pd, nh, &ext_len);
+ if (unlikely(nh < 0))
+ return -EINVAL;
+ }
+
+ /* invalid l3len - extension exceeds header length */
+ if (unlikely(pd + ext_len != plimit))
+ return -EINVAL;
+
+ /* save last extension offset */
+ p_nh = pd;
+ }
+
+ /* update header type; return original value */
+ rc = *p_nh;
+ *p_nh = proto;
+
+ /* fix packet length */
+ v6h->payload_len = rte_cpu_to_be_16(plen - l2len -
+ sizeof(*v6h));
+ }
+
+ return rc;
+}
+
+/*
+ * Inline functions to get and set ipv6 packet header traffic class (TC) field.
+ */
+static inline uint8_t
+get_ipv6_tc(rte_be32_t vtc_flow)
+{
+ uint32_t v;
+
+ v = rte_be_to_cpu_32(vtc_flow);
+ return v >> RTE_IPV6_HDR_TC_SHIFT;
+}
+
+static inline rte_be32_t
+set_ipv6_tc(rte_be32_t vtc_flow, uint32_t tos)
+{
+ uint32_t v;
+
+ v = rte_cpu_to_be_32(tos << RTE_IPV6_HDR_TC_SHIFT);
+ vtc_flow &= ~rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK);
+
+ return (v | vtc_flow);
+}
+
+/**
+ * Update type-of-service/traffic-class field of outbound tunnel packet.
+ *
+ * @param ref_h: reference header, for outbound it is inner header, otherwise
+ * outer header.
+ * @param update_h: header to be updated tos/tc field, for outbound it is outer
+ * header, otherwise inner header.
+ * @param tos_mask: type-of-service mask stored in sa.
+ * @param is_outh_ipv4: 1 if outer header is ipv4, 0 if it is ipv6.
+ * @param is_inner_ipv4: 1 if inner header is ipv4, 0 if it is ipv6.
+ */
+static inline void
+update_outb_tun_tos(const void *ref_h, void *update_h, uint32_t tos_mask,
+ uint8_t is_outh_ipv4, uint8_t is_inh_ipv4)
+{
+ uint8_t idx = ((is_outh_ipv4 << 1) | is_inh_ipv4);
+ struct rte_ipv4_hdr *v4out_h;
+ struct rte_ipv6_hdr *v6out_h;
+ uint32_t itp, otp;
+
+ switch (idx) {
+ case 0: /*outh ipv6, inh ipv6 */
+ v6out_h = update_h;
+ otp = get_ipv6_tc(v6out_h->vtc_flow) & ~tos_mask;
+ itp = get_ipv6_tc(((const struct rte_ipv6_hdr *)ref_h)->
+ vtc_flow) & tos_mask;
+ v6out_h->vtc_flow = set_ipv6_tc(v6out_h->vtc_flow, otp | itp);
+ break;
+ case 1: /*outh ipv6, inh ipv4 */
+ v6out_h = update_h;
+ otp = get_ipv6_tc(v6out_h->vtc_flow) & ~tos_mask;
+ itp = ((const struct rte_ipv4_hdr *)ref_h)->type_of_service &
+ tos_mask;
+ v6out_h->vtc_flow = set_ipv6_tc(v6out_h->vtc_flow, otp | itp);
+ break;
+ case 2: /*outh ipv4, inh ipv6 */
+ v4out_h = update_h;
+ otp = v4out_h->type_of_service & ~tos_mask;
+ itp = get_ipv6_tc(((const struct rte_ipv6_hdr *)ref_h)->
+ vtc_flow) & tos_mask;
+ v4out_h->type_of_service = (otp | itp);
+ break;
+ case 3: /* outh ipv4, inh ipv4 */
+ v4out_h = update_h;
+ otp = v4out_h->type_of_service & ~tos_mask;
+ itp = ((const struct rte_ipv4_hdr *)ref_h)->type_of_service &
+ tos_mask;
+ v4out_h->type_of_service = (otp | itp);
+ break;
+ }
+}
+
+/**
+ * Update type-of-service/traffic-class field of inbound tunnel packet.
+ *
+ * @param ref_h: reference header, for outbound it is inner header, otherwise
+ * outer header.
+ * @param update_h: header to be updated tos/tc field, for outbound it is outer
+ * header, otherwise inner header.
+ * @param is_outh_ipv4: 1 if outer header is ipv4, 0 if it is ipv6.
+ * @param is_inner_ipv4: 1 if inner header is ipv4, 0 if it is ipv6.
+ */
+static inline void
+update_inb_tun_tos(const void *ref_h, void *update_h,
+ uint8_t is_outh_ipv4, uint8_t is_inh_ipv4)
+{
+ uint8_t idx = ((is_outh_ipv4 << 1) | is_inh_ipv4);
+ struct rte_ipv4_hdr *v4in_h;
+ struct rte_ipv6_hdr *v6in_h;
+ uint8_t ecn_v4out, ecn_v4in;
+ uint32_t ecn_v6out, ecn_v6in;
+
+ switch (idx) {
+ case 0: /* outh ipv6, inh ipv6 */
+ v6in_h = update_h;
+ ecn_v6out = ((const struct rte_ipv6_hdr *)ref_h)->vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_MASK);
+ ecn_v6in = v6in_h->vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_MASK);
+ if ((ecn_v6out == rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_CE)) &&
+ (ecn_v6in != 0))
+ v6in_h->vtc_flow |=
+ rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_CE);
+ break;
+ case 1: /* outh ipv6, inh ipv4 */
+ v4in_h = update_h;
+ ecn_v6out = ((const struct rte_ipv6_hdr *)ref_h)->vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_MASK);
+ ecn_v4in = v4in_h->type_of_service & RTE_IPV4_HDR_ECN_MASK;
+ if ((ecn_v6out == rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_CE)) &&
+ (ecn_v4in != 0))
+ v4in_h->type_of_service |= RTE_IPV4_HDR_ECN_CE;
+ break;
+ case 2: /* outh ipv4, inh ipv6 */
+ v6in_h = update_h;
+ ecn_v4out = ((const struct rte_ipv4_hdr *)ref_h)->
+ type_of_service & RTE_IPV4_HDR_ECN_MASK;
+ ecn_v6in = v6in_h->vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_MASK);
+ if (ecn_v4out == RTE_IPV4_HDR_ECN_CE && ecn_v6in != 0)
+ v6in_h->vtc_flow |=
+ rte_cpu_to_be_32(RTE_IPV6_HDR_ECN_CE);
+ break;
+ case 3: /* outh ipv4, inh ipv4 */
+ v4in_h = update_h;
+ ecn_v4out = ((const struct rte_ipv4_hdr *)ref_h)->
+ type_of_service & RTE_IPV4_HDR_ECN_MASK;
+ ecn_v4in = v4in_h->type_of_service & RTE_IPV4_HDR_ECN_MASK;
+ if (ecn_v4out == RTE_IPV4_HDR_ECN_CE && ecn_v4in != 0)
+ v4in_h->type_of_service |= RTE_IPV4_HDR_ECN_CE;
+ break;
+ }
+}
+
+/* update original and new ip header fields for tunnel case */
+static inline void
+update_tun_outb_l3hdr(const struct rte_ipsec_sa *sa, void *outh,
+ const void *inh, uint32_t plen, uint32_t l2len, rte_be16_t pid)
+{
+ struct rte_ipv4_hdr *v4h;
+ struct rte_ipv6_hdr *v6h;
+ uint8_t is_outh_ipv4;
+
+ if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) {
+ is_outh_ipv4 = 1;
+ v4h = outh;
+ v4h->packet_id = pid;
+ v4h->total_length = rte_cpu_to_be_16(plen - l2len);
+ } else {
+ is_outh_ipv4 = 0;
+ v6h = outh;
+ v6h->payload_len = rte_cpu_to_be_16(plen - l2len -
+ sizeof(*v6h));
+ }
+
+ if (sa->type & TUN_HDR_MSK)
+ update_outb_tun_tos(inh, outh, sa->tos_mask, is_outh_ipv4,
+ ((sa->type & RTE_IPSEC_SATP_IPV_MASK) ==
+ RTE_IPSEC_SATP_IPV4));
+}
+
+static inline void
+update_tun_inb_l3hdr(const struct rte_ipsec_sa *sa, const void *outh,
+ void *inh)
+{
+ if (sa->type & TUN_HDR_MSK)
+ update_inb_tun_tos(outh, inh,
+ ((sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) != 0),
+ ((sa->type & RTE_IPSEC_SATP_IPV_MASK) ==
+ RTE_IPSEC_SATP_IPV4));
+}
+
+#endif /* _IPH_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/ipsec_sad.c b/src/spdk/dpdk/lib/librte_ipsec/ipsec_sad.c
new file mode 100644
index 000000000..3f9533c80
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/ipsec_sad.c
@@ -0,0 +1,559 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_rwlock.h>
+#include <rte_tailq.h>
+
+#include "rte_ipsec_sad.h"
+
+/*
+ * Rules are stored in three hash tables depending on key_type.
+ * Each rule will also be stored in SPI_ONLY table.
+ * for each data entry within this table last two bits are reserved to
+ * indicate presence of entries with the same SPI in DIP and DIP+SIP tables.
+ */
+
+#define SAD_PREFIX "SAD_"
+/* "SAD_<name>" */
+#define SAD_FORMAT SAD_PREFIX "%s"
+
+#define DEFAULT_HASH_FUNC rte_hash_crc
+#define MIN_HASH_ENTRIES 8U /* From rte_cuckoo_hash.h */
+
+struct hash_cnt {
+ uint32_t cnt_dip;
+ uint32_t cnt_dip_sip;
+};
+
+struct rte_ipsec_sad {
+ char name[RTE_IPSEC_SAD_NAMESIZE];
+ struct rte_hash *hash[RTE_IPSEC_SAD_KEY_TYPE_MASK];
+ uint32_t keysize[RTE_IPSEC_SAD_KEY_TYPE_MASK];
+ uint32_t init_val;
+ /* Array to track number of more specific rules
+ * (spi_dip or spi_dip_sip). Used only in add/delete
+ * as a helper struct.
+ */
+ __extension__ struct hash_cnt cnt_arr[];
+};
+
+TAILQ_HEAD(rte_ipsec_sad_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_ipsec_sad_tailq = {
+ .name = "RTE_IPSEC_SAD",
+};
+EAL_REGISTER_TAILQ(rte_ipsec_sad_tailq)
+
+#define SET_BIT(ptr, bit) (void *)((uintptr_t)(ptr) | (uintptr_t)(bit))
+#define CLEAR_BIT(ptr, bit) (void *)((uintptr_t)(ptr) & ~(uintptr_t)(bit))
+#define GET_BIT(ptr, bit) (void *)((uintptr_t)(ptr) & (uintptr_t)(bit))
+
+/*
+ * @internal helper function
+ * Add a rule of type SPI_DIP or SPI_DIP_SIP.
+ * Inserts a rule into an appropriate hash table,
+ * updates the value for a given SPI in SPI_ONLY hash table
+ * reflecting presence of more specific rule type in two LSBs.
+ * Updates a counter that reflects the number of rules whith the same SPI.
+ */
+static inline int
+add_specific(struct rte_ipsec_sad *sad, const void *key,
+ int key_type, void *sa)
+{
+ void *tmp_val;
+ int ret, notexist;
+
+ /* Check if the key is present in the table.
+ * Need for further accaunting in cnt_arr
+ */
+ ret = rte_hash_lookup_with_hash(sad->hash[key_type], key,
+ rte_hash_crc(key, sad->keysize[key_type], sad->init_val));
+ notexist = (ret == -ENOENT);
+
+ /* Add an SA to the corresponding table.*/
+ ret = rte_hash_add_key_with_hash_data(sad->hash[key_type], key,
+ rte_hash_crc(key, sad->keysize[key_type], sad->init_val), sa);
+ if (ret != 0)
+ return ret;
+
+ /* Check if there is an entry in SPI only table with the same SPI */
+ ret = rte_hash_lookup_with_hash_data(sad->hash[RTE_IPSEC_SAD_SPI_ONLY],
+ key, rte_hash_crc(key, sad->keysize[RTE_IPSEC_SAD_SPI_ONLY],
+ sad->init_val), &tmp_val);
+ if (ret < 0)
+ tmp_val = NULL;
+ tmp_val = SET_BIT(tmp_val, key_type);
+
+ /* Add an entry into SPI only table */
+ ret = rte_hash_add_key_with_hash_data(
+ sad->hash[RTE_IPSEC_SAD_SPI_ONLY], key,
+ rte_hash_crc(key, sad->keysize[RTE_IPSEC_SAD_SPI_ONLY],
+ sad->init_val), tmp_val);
+ if (ret != 0)
+ return ret;
+
+ /* Update a counter for a given SPI */
+ ret = rte_hash_lookup_with_hash(sad->hash[RTE_IPSEC_SAD_SPI_ONLY], key,
+ rte_hash_crc(key, sad->keysize[RTE_IPSEC_SAD_SPI_ONLY],
+ sad->init_val));
+ if (ret < 0)
+ return ret;
+ if (key_type == RTE_IPSEC_SAD_SPI_DIP)
+ sad->cnt_arr[ret].cnt_dip += notexist;
+ else
+ sad->cnt_arr[ret].cnt_dip_sip += notexist;
+
+ return 0;
+}
+
+int
+rte_ipsec_sad_add(struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *key,
+ int key_type, void *sa)
+{
+ void *tmp_val;
+ int ret;
+
+ if ((sad == NULL) || (key == NULL) || (sa == NULL) ||
+ /* sa must be 4 byte aligned */
+ (GET_BIT(sa, RTE_IPSEC_SAD_KEY_TYPE_MASK) != 0))
+ return -EINVAL;
+
+ /*
+ * Rules are stored in three hash tables depending on key_type.
+ * All rules will also have an entry in SPI_ONLY table, with entry
+ * value's two LSB's also indicating presence of rule with this SPI
+ * in other tables.
+ */
+ switch (key_type) {
+ case(RTE_IPSEC_SAD_SPI_ONLY):
+ ret = rte_hash_lookup_with_hash_data(sad->hash[key_type],
+ key, rte_hash_crc(key, sad->keysize[key_type],
+ sad->init_val), &tmp_val);
+ if (ret >= 0)
+ tmp_val = SET_BIT(sa, GET_BIT(tmp_val,
+ RTE_IPSEC_SAD_KEY_TYPE_MASK));
+ else
+ tmp_val = sa;
+ ret = rte_hash_add_key_with_hash_data(sad->hash[key_type],
+ key, rte_hash_crc(key, sad->keysize[key_type],
+ sad->init_val), tmp_val);
+ return ret;
+ case(RTE_IPSEC_SAD_SPI_DIP):
+ case(RTE_IPSEC_SAD_SPI_DIP_SIP):
+ return add_specific(sad, key, key_type, sa);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * @internal helper function
+ * Delete a rule of type SPI_DIP or SPI_DIP_SIP.
+ * Deletes an entry from an appropriate hash table and decrements
+ * an entry counter for given SPI.
+ * If entry to remove is the last one with given SPI within the table,
+ * then it will also update related entry in SPI_ONLY table.
+ * Removes an entry from SPI_ONLY hash table if there no rule left
+ * for this SPI in any table.
+ */
+static inline int
+del_specific(struct rte_ipsec_sad *sad, const void *key, int key_type)
+{
+ void *tmp_val;
+ int ret;
+ uint32_t *cnt;
+
+ /* Remove an SA from the corresponding table.*/
+ ret = rte_hash_del_key_with_hash(sad->hash[key_type], key,
+ rte_hash_crc(key, sad->keysize[key_type], sad->init_val));
+ if (ret < 0)
+ return ret;
+
+ /* Get an index of cnt_arr entry for a given SPI */
+ ret = rte_hash_lookup_with_hash_data(sad->hash[RTE_IPSEC_SAD_SPI_ONLY],
+ key, rte_hash_crc(key, sad->keysize[RTE_IPSEC_SAD_SPI_ONLY],
+ sad->init_val), &tmp_val);
+ if (ret < 0)
+ return ret;
+ cnt = (key_type == RTE_IPSEC_SAD_SPI_DIP) ?
+ &sad->cnt_arr[ret].cnt_dip :
+ &sad->cnt_arr[ret].cnt_dip_sip;
+ if (--(*cnt) != 0)
+ return 0;
+
+ /* corresponding counter is 0, clear the bit indicating
+ * the presence of more specific rule for a given SPI.
+ */
+ tmp_val = CLEAR_BIT(tmp_val, key_type);
+
+ /* if there are no rules left with same SPI,
+ * remove an entry from SPI_only table
+ */
+ if (tmp_val == NULL)
+ ret = rte_hash_del_key_with_hash(
+ sad->hash[RTE_IPSEC_SAD_SPI_ONLY], key,
+ rte_hash_crc(key, sad->keysize[RTE_IPSEC_SAD_SPI_ONLY],
+ sad->init_val));
+ else
+ ret = rte_hash_add_key_with_hash_data(
+ sad->hash[RTE_IPSEC_SAD_SPI_ONLY], key,
+ rte_hash_crc(key, sad->keysize[RTE_IPSEC_SAD_SPI_ONLY],
+ sad->init_val), tmp_val);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int
+rte_ipsec_sad_del(struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *key,
+ int key_type)
+{
+ void *tmp_val;
+ int ret;
+
+ if ((sad == NULL) || (key == NULL))
+ return -EINVAL;
+ switch (key_type) {
+ case(RTE_IPSEC_SAD_SPI_ONLY):
+ ret = rte_hash_lookup_with_hash_data(sad->hash[key_type],
+ key, rte_hash_crc(key, sad->keysize[key_type],
+ sad->init_val), &tmp_val);
+ if (ret < 0)
+ return ret;
+ if (GET_BIT(tmp_val, RTE_IPSEC_SAD_KEY_TYPE_MASK) == 0) {
+ ret = rte_hash_del_key_with_hash(sad->hash[key_type],
+ key, rte_hash_crc(key, sad->keysize[key_type],
+ sad->init_val));
+ ret = ret < 0 ? ret : 0;
+ } else {
+ tmp_val = GET_BIT(tmp_val,
+ RTE_IPSEC_SAD_KEY_TYPE_MASK);
+ ret = rte_hash_add_key_with_hash_data(
+ sad->hash[key_type], key,
+ rte_hash_crc(key, sad->keysize[key_type],
+ sad->init_val), tmp_val);
+ }
+ return ret;
+ case(RTE_IPSEC_SAD_SPI_DIP):
+ case(RTE_IPSEC_SAD_SPI_DIP_SIP):
+ return del_specific(sad, key, key_type);
+ default:
+ return -EINVAL;
+ }
+}
+
+struct rte_ipsec_sad *
+rte_ipsec_sad_create(const char *name, const struct rte_ipsec_sad_conf *conf)
+{
+ char hash_name[RTE_HASH_NAMESIZE];
+ char sad_name[RTE_IPSEC_SAD_NAMESIZE];
+ struct rte_tailq_entry *te;
+ struct rte_ipsec_sad_list *sad_list;
+ struct rte_ipsec_sad *sad, *tmp_sad = NULL;
+ struct rte_hash_parameters hash_params = {0};
+ int ret;
+ uint32_t sa_sum;
+
+ RTE_BUILD_BUG_ON(RTE_IPSEC_SAD_KEY_TYPE_MASK != 3);
+
+ if ((name == NULL) || (conf == NULL) ||
+ ((conf->max_sa[RTE_IPSEC_SAD_SPI_ONLY] == 0) &&
+ (conf->max_sa[RTE_IPSEC_SAD_SPI_DIP] == 0) &&
+ (conf->max_sa[RTE_IPSEC_SAD_SPI_DIP_SIP] == 0))) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ ret = snprintf(sad_name, RTE_IPSEC_SAD_NAMESIZE, SAD_FORMAT, name);
+ if (ret < 0 || ret >= RTE_IPSEC_SAD_NAMESIZE) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ /** Init SAD*/
+ sa_sum = RTE_MAX(MIN_HASH_ENTRIES,
+ conf->max_sa[RTE_IPSEC_SAD_SPI_ONLY]) +
+ RTE_MAX(MIN_HASH_ENTRIES,
+ conf->max_sa[RTE_IPSEC_SAD_SPI_DIP]) +
+ RTE_MAX(MIN_HASH_ENTRIES,
+ conf->max_sa[RTE_IPSEC_SAD_SPI_DIP_SIP]);
+ sad = rte_zmalloc_socket(NULL, sizeof(*sad) +
+ (sizeof(struct hash_cnt) * sa_sum),
+ RTE_CACHE_LINE_SIZE, conf->socket_id);
+ if (sad == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ memcpy(sad->name, sad_name, sizeof(sad_name));
+
+ hash_params.hash_func = DEFAULT_HASH_FUNC;
+ hash_params.hash_func_init_val = rte_rand();
+ sad->init_val = hash_params.hash_func_init_val;
+ hash_params.socket_id = conf->socket_id;
+ hash_params.name = hash_name;
+ if (conf->flags & RTE_IPSEC_SAD_FLAG_RW_CONCURRENCY)
+ hash_params.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+
+ /** Init hash[RTE_IPSEC_SAD_SPI_ONLY] for SPI only */
+ snprintf(hash_name, sizeof(hash_name), "sad_1_%p", sad);
+ hash_params.key_len = sizeof(((struct rte_ipsec_sadv4_key *)0)->spi);
+ sad->keysize[RTE_IPSEC_SAD_SPI_ONLY] = hash_params.key_len;
+ hash_params.entries = sa_sum;
+ sad->hash[RTE_IPSEC_SAD_SPI_ONLY] = rte_hash_create(&hash_params);
+ if (sad->hash[RTE_IPSEC_SAD_SPI_ONLY] == NULL) {
+ rte_ipsec_sad_destroy(sad);
+ return NULL;
+ }
+
+ /** Init hash[RTE_IPSEC_SAD_SPI_DIP] for SPI + DIP */
+ snprintf(hash_name, sizeof(hash_name), "sad_2_%p", sad);
+ if (conf->flags & RTE_IPSEC_SAD_FLAG_IPV6)
+ hash_params.key_len +=
+ sizeof(((struct rte_ipsec_sadv6_key *)0)->dip);
+ else
+ hash_params.key_len +=
+ sizeof(((struct rte_ipsec_sadv4_key *)0)->dip);
+ sad->keysize[RTE_IPSEC_SAD_SPI_DIP] = hash_params.key_len;
+ hash_params.entries = RTE_MAX(MIN_HASH_ENTRIES,
+ conf->max_sa[RTE_IPSEC_SAD_SPI_DIP]);
+ sad->hash[RTE_IPSEC_SAD_SPI_DIP] = rte_hash_create(&hash_params);
+ if (sad->hash[RTE_IPSEC_SAD_SPI_DIP] == NULL) {
+ rte_ipsec_sad_destroy(sad);
+ return NULL;
+ }
+
+ /** Init hash[[RTE_IPSEC_SAD_SPI_DIP_SIP] for SPI + DIP + SIP */
+ snprintf(hash_name, sizeof(hash_name), "sad_3_%p", sad);
+ if (conf->flags & RTE_IPSEC_SAD_FLAG_IPV6)
+ hash_params.key_len +=
+ sizeof(((struct rte_ipsec_sadv6_key *)0)->sip);
+ else
+ hash_params.key_len +=
+ sizeof(((struct rte_ipsec_sadv4_key *)0)->sip);
+ sad->keysize[RTE_IPSEC_SAD_SPI_DIP_SIP] = hash_params.key_len;
+ hash_params.entries = RTE_MAX(MIN_HASH_ENTRIES,
+ conf->max_sa[RTE_IPSEC_SAD_SPI_DIP_SIP]);
+ sad->hash[RTE_IPSEC_SAD_SPI_DIP_SIP] = rte_hash_create(&hash_params);
+ if (sad->hash[RTE_IPSEC_SAD_SPI_DIP_SIP] == NULL) {
+ rte_ipsec_sad_destroy(sad);
+ return NULL;
+ }
+
+ sad_list = RTE_TAILQ_CAST(rte_ipsec_sad_tailq.head,
+ rte_ipsec_sad_list);
+ rte_mcfg_tailq_write_lock();
+ /* guarantee there's no existing */
+ TAILQ_FOREACH(te, sad_list, next) {
+ tmp_sad = (struct rte_ipsec_sad *)te->data;
+ if (strncmp(sad_name, tmp_sad->name,
+ RTE_IPSEC_SAD_NAMESIZE) == 0)
+ break;
+ }
+ if (te != NULL) {
+ rte_mcfg_tailq_write_unlock();
+ rte_errno = EEXIST;
+ rte_ipsec_sad_destroy(sad);
+ return NULL;
+ }
+
+ /* allocate tailq entry */
+ te = rte_zmalloc("IPSEC_SAD_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ rte_mcfg_tailq_write_unlock();
+ rte_errno = ENOMEM;
+ rte_ipsec_sad_destroy(sad);
+ return NULL;
+ }
+
+ te->data = (void *)sad;
+ TAILQ_INSERT_TAIL(sad_list, te, next);
+ rte_mcfg_tailq_write_unlock();
+ return sad;
+}
+
+struct rte_ipsec_sad *
+rte_ipsec_sad_find_existing(const char *name)
+{
+ char sad_name[RTE_IPSEC_SAD_NAMESIZE];
+ struct rte_ipsec_sad *sad = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_ipsec_sad_list *sad_list;
+ int ret;
+
+ ret = snprintf(sad_name, RTE_IPSEC_SAD_NAMESIZE, SAD_FORMAT, name);
+ if (ret < 0 || ret >= RTE_IPSEC_SAD_NAMESIZE) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ sad_list = RTE_TAILQ_CAST(rte_ipsec_sad_tailq.head,
+ rte_ipsec_sad_list);
+
+ rte_mcfg_tailq_read_lock();
+ TAILQ_FOREACH(te, sad_list, next) {
+ sad = (struct rte_ipsec_sad *) te->data;
+ if (strncmp(sad_name, sad->name, RTE_IPSEC_SAD_NAMESIZE) == 0)
+ break;
+ }
+ rte_mcfg_tailq_read_unlock();
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return sad;
+}
+
+void
+rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad)
+{
+ struct rte_tailq_entry *te;
+ struct rte_ipsec_sad_list *sad_list;
+
+ if (sad == NULL)
+ return;
+
+ sad_list = RTE_TAILQ_CAST(rte_ipsec_sad_tailq.head,
+ rte_ipsec_sad_list);
+ rte_mcfg_tailq_write_lock();
+ TAILQ_FOREACH(te, sad_list, next) {
+ if (te->data == (void *)sad)
+ break;
+ }
+ if (te != NULL)
+ TAILQ_REMOVE(sad_list, te, next);
+
+ rte_mcfg_tailq_write_unlock();
+
+ rte_hash_free(sad->hash[RTE_IPSEC_SAD_SPI_ONLY]);
+ rte_hash_free(sad->hash[RTE_IPSEC_SAD_SPI_DIP]);
+ rte_hash_free(sad->hash[RTE_IPSEC_SAD_SPI_DIP_SIP]);
+ rte_free(sad);
+ if (te != NULL)
+ rte_free(te);
+}
+
+/*
+ * @internal helper function
+ * Lookup a batch of keys in three hash tables.
+ * First lookup key in SPI_ONLY table.
+ * If there is an entry for the corresponding SPI check its value.
+ * Two least significant bits of the value indicate
+ * the presence of more specific rule in other tables.
+ * Perform additional lookup in corresponding hash tables
+ * and update the value if lookup succeeded.
+ */
+static int
+__ipsec_sad_lookup(const struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *keys[], void *sa[], uint32_t n)
+{
+ const void *keys_2[RTE_HASH_LOOKUP_BULK_MAX];
+ const void *keys_3[RTE_HASH_LOOKUP_BULK_MAX];
+ void *vals_2[RTE_HASH_LOOKUP_BULK_MAX] = {NULL};
+ void *vals_3[RTE_HASH_LOOKUP_BULK_MAX] = {NULL};
+ uint32_t idx_2[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t idx_3[RTE_HASH_LOOKUP_BULK_MAX];
+ uint64_t mask_1, mask_2, mask_3;
+ uint64_t map, map_spec;
+ uint32_t n_2 = 0;
+ uint32_t n_3 = 0;
+ uint32_t i;
+ int found = 0;
+ hash_sig_t hash_sig[RTE_HASH_LOOKUP_BULK_MAX];
+ hash_sig_t hash_sig_2[RTE_HASH_LOOKUP_BULK_MAX];
+ hash_sig_t hash_sig_3[RTE_HASH_LOOKUP_BULK_MAX];
+
+ for (i = 0; i < n; i++) {
+ sa[i] = NULL;
+ hash_sig[i] = rte_hash_crc_4byte(keys[i]->v4.spi,
+ sad->init_val);
+ }
+
+ /*
+ * Lookup keys in SPI only hash table first.
+ */
+ rte_hash_lookup_with_hash_bulk_data(sad->hash[RTE_IPSEC_SAD_SPI_ONLY],
+ (const void **)keys, hash_sig, n, &mask_1, sa);
+ for (map = mask_1; map; map &= (map - 1)) {
+ i = rte_bsf64(map);
+ /*
+ * if returned value indicates presence of a rule in other
+ * tables save a key for further lookup.
+ */
+ if ((uintptr_t)sa[i] & RTE_IPSEC_SAD_SPI_DIP_SIP) {
+ idx_3[n_3] = i;
+ hash_sig_3[n_3] = rte_hash_crc(keys[i],
+ sad->keysize[RTE_IPSEC_SAD_SPI_DIP_SIP],
+ sad->init_val);
+ keys_3[n_3++] = keys[i];
+ }
+ if ((uintptr_t)sa[i] & RTE_IPSEC_SAD_SPI_DIP) {
+ idx_2[n_2] = i;
+ hash_sig_2[n_2] = rte_hash_crc(keys[i],
+ sad->keysize[RTE_IPSEC_SAD_SPI_DIP],
+ sad->init_val);
+ keys_2[n_2++] = keys[i];
+ }
+ /* clear 2 LSB's which indicate the presence
+ * of more specific rules
+ */
+ sa[i] = CLEAR_BIT(sa[i], RTE_IPSEC_SAD_KEY_TYPE_MASK);
+ }
+
+ /* Lookup for more specific rules in SPI_DIP table */
+ if (n_2 != 0) {
+ rte_hash_lookup_with_hash_bulk_data(
+ sad->hash[RTE_IPSEC_SAD_SPI_DIP],
+ keys_2, hash_sig_2, n_2, &mask_2, vals_2);
+ for (map_spec = mask_2; map_spec; map_spec &= (map_spec - 1)) {
+ i = rte_bsf64(map_spec);
+ sa[idx_2[i]] = vals_2[i];
+ }
+ }
+ /* Lookup for more specific rules in SPI_DIP_SIP table */
+ if (n_3 != 0) {
+ rte_hash_lookup_with_hash_bulk_data(
+ sad->hash[RTE_IPSEC_SAD_SPI_DIP_SIP],
+ keys_3, hash_sig_3, n_3, &mask_3, vals_3);
+ for (map_spec = mask_3; map_spec; map_spec &= (map_spec - 1)) {
+ i = rte_bsf64(map_spec);
+ sa[idx_3[i]] = vals_3[i];
+ }
+ }
+
+ for (i = 0; i < n; i++)
+ found += (sa[i] != NULL);
+
+ return found;
+}
+
+int
+rte_ipsec_sad_lookup(const struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *keys[], void *sa[], uint32_t n)
+{
+ uint32_t num, i = 0;
+ int found = 0;
+
+ if (unlikely((sad == NULL) || (keys == NULL) || (sa == NULL)))
+ return -EINVAL;
+
+ do {
+ num = RTE_MIN(n - i, (uint32_t)RTE_HASH_LOOKUP_BULK_MAX);
+ found += __ipsec_sad_lookup(sad,
+ &keys[i], &sa[i], num);
+ i += num;
+ } while (i != n);
+
+ return found;
+}
diff --git a/src/spdk/dpdk/lib/librte_ipsec/ipsec_sqn.h b/src/spdk/dpdk/lib/librte_ipsec/ipsec_sqn.h
new file mode 100644
index 000000000..2636cb153
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/ipsec_sqn.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IPSEC_SQN_H_
+#define _IPSEC_SQN_H_
+
+#define WINDOW_BUCKET_BITS 6 /* uint64_t */
+#define WINDOW_BUCKET_SIZE (1 << WINDOW_BUCKET_BITS)
+#define WINDOW_BIT_LOC_MASK (WINDOW_BUCKET_SIZE - 1)
+
+/* minimum number of bucket, power of 2*/
+#define WINDOW_BUCKET_MIN 2
+#define WINDOW_BUCKET_MAX (INT16_MAX + 1)
+
+#define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX)
+
+#define SQN_ATOMIC(sa) ((sa)->type & RTE_IPSEC_SATP_SQN_ATOM)
+
+/*
+ * gets SQN.hi32 bits, SQN supposed to be in network byte order.
+ */
+static inline rte_be32_t
+sqn_hi32(rte_be64_t sqn)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ return (sqn >> 32);
+#else
+ return sqn;
+#endif
+}
+
+/*
+ * gets SQN.low32 bits, SQN supposed to be in network byte order.
+ */
+static inline rte_be32_t
+sqn_low32(rte_be64_t sqn)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ return sqn;
+#else
+ return (sqn >> 32);
+#endif
+}
+
+/*
+ * gets SQN.low16 bits, SQN supposed to be in network byte order.
+ */
+static inline rte_be16_t
+sqn_low16(rte_be64_t sqn)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ return sqn;
+#else
+ return (sqn >> 48);
+#endif
+}
+
+/*
+ * According to RFC4303 A2.1, determine the high-order bit of sequence number.
+ * use 32bit arithmetic inside, return uint64_t.
+ */
+static inline uint64_t
+reconstruct_esn(uint64_t t, uint32_t sqn, uint32_t w)
+{
+ uint32_t th, tl, bl;
+
+ tl = t;
+ th = t >> 32;
+ bl = tl - w + 1;
+
+ /* case A: window is within one sequence number subspace */
+ if (tl >= (w - 1))
+ th += (sqn < bl);
+ /* case B: window spans two sequence number subspaces */
+ else if (th != 0)
+ th -= (sqn >= bl);
+
+ /* return constructed sequence with proper high-order bits */
+ return (uint64_t)th << 32 | sqn;
+}
+
+/**
+ * Perform the replay checking.
+ *
+ * struct rte_ipsec_sa contains the window and window related parameters,
+ * such as the window size, bitmask, and the last acknowledged sequence number.
+ *
+ * Based on RFC 6479.
+ * Blocks are 64 bits unsigned integers
+ */
+static inline int32_t
+esn_inb_check_sqn(const struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
+ uint64_t sqn)
+{
+ uint32_t bit, bucket;
+
+ /* replay not enabled */
+ if (sa->replay.win_sz == 0)
+ return 0;
+
+ /* seq is larger than lastseq */
+ if (sqn > rsn->sqn)
+ return 0;
+
+ /* seq is outside window */
+ if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
+ return -EINVAL;
+
+ /* seq is inside the window */
+ bit = sqn & WINDOW_BIT_LOC_MASK;
+ bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask;
+
+ /* already seen packet */
+ if (rsn->window[bucket] & ((uint64_t)1 << bit))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * For outbound SA perform the sequence number update.
+ */
+static inline uint64_t
+esn_outb_update_sqn(struct rte_ipsec_sa *sa, uint32_t *num)
+{
+ uint64_t n, s, sqn;
+
+ n = *num;
+ if (SQN_ATOMIC(sa))
+ sqn = __atomic_add_fetch(&sa->sqn.outb, n, __ATOMIC_RELAXED);
+ else {
+ sqn = sa->sqn.outb + n;
+ sa->sqn.outb = sqn;
+ }
+
+ /* overflow */
+ if (sqn > sa->sqn_mask) {
+ s = sqn - sa->sqn_mask;
+ *num = (s < n) ? n - s : 0;
+ }
+
+ return sqn - n;
+}
+
+/**
+ * For inbound SA perform the sequence number and replay window update.
+ */
+static inline int32_t
+esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
+ uint64_t sqn)
+{
+ uint32_t bit, bucket, last_bucket, new_bucket, diff, i;
+
+ /* handle ESN */
+ if (IS_ESN(sa))
+ sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+
+ /* seq is outside window*/
+ if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
+ return -EINVAL;
+
+ /* update the bit */
+ bucket = (sqn >> WINDOW_BUCKET_BITS);
+
+ /* check if the seq is within the range */
+ if (sqn > rsn->sqn) {
+ last_bucket = rsn->sqn >> WINDOW_BUCKET_BITS;
+ diff = bucket - last_bucket;
+ /* seq is way after the range of WINDOW_SIZE */
+ if (diff > sa->replay.nb_bucket)
+ diff = sa->replay.nb_bucket;
+
+ for (i = 0; i != diff; i++) {
+ new_bucket = (i + last_bucket + 1) &
+ sa->replay.bucket_index_mask;
+ rsn->window[new_bucket] = 0;
+ }
+ rsn->sqn = sqn;
+ }
+
+ bucket &= sa->replay.bucket_index_mask;
+ bit = (uint64_t)1 << (sqn & WINDOW_BIT_LOC_MASK);
+
+ /* already seen packet */
+ if (rsn->window[bucket] & bit)
+ return -EINVAL;
+
+ rsn->window[bucket] |= bit;
+ return 0;
+}
+
+/**
+ * To achieve ability to do multiple readers single writer for
+ * SA replay window information and sequence number (RSN)
+ * basic RCU schema is used:
+ * SA have 2 copies of RSN (one for readers, another for writers).
+ * Each RSN contains a rwlock that has to be grabbed (for read/write)
+ * to avoid races between readers and writer.
+ * Writer is responsible to make a copy or reader RSN, update it
+ * and mark newly updated RSN as readers one.
+ * That approach is intended to minimize contention and cache sharing
+ * between writer and readers.
+ */
+
+/**
+ * Copy replay window and SQN.
+ */
+static inline void
+rsn_copy(const struct rte_ipsec_sa *sa, uint32_t dst, uint32_t src)
+{
+ uint32_t i, n;
+ struct replay_sqn *d;
+ const struct replay_sqn *s;
+
+ d = sa->sqn.inb.rsn[dst];
+ s = sa->sqn.inb.rsn[src];
+
+ n = sa->replay.nb_bucket;
+
+ d->sqn = s->sqn;
+ for (i = 0; i != n; i++)
+ d->window[i] = s->window[i];
+}
+
+/**
+ * Get RSN for read-only access.
+ */
+static inline struct replay_sqn *
+rsn_acquire(struct rte_ipsec_sa *sa)
+{
+ uint32_t n;
+ struct replay_sqn *rsn;
+
+ n = sa->sqn.inb.rdidx;
+ rsn = sa->sqn.inb.rsn[n];
+
+ if (!SQN_ATOMIC(sa))
+ return rsn;
+
+ /* check there are no writers */
+ while (rte_rwlock_read_trylock(&rsn->rwl) < 0) {
+ rte_pause();
+ n = sa->sqn.inb.rdidx;
+ rsn = sa->sqn.inb.rsn[n];
+ rte_compiler_barrier();
+ }
+
+ return rsn;
+}
+
+/**
+ * Release read-only access for RSN.
+ */
+static inline void
+rsn_release(struct rte_ipsec_sa *sa, struct replay_sqn *rsn)
+{
+ if (SQN_ATOMIC(sa))
+ rte_rwlock_read_unlock(&rsn->rwl);
+}
+
+/**
+ * Start RSN update.
+ */
+static inline struct replay_sqn *
+rsn_update_start(struct rte_ipsec_sa *sa)
+{
+ uint32_t k, n;
+ struct replay_sqn *rsn;
+
+ n = sa->sqn.inb.wridx;
+
+ /* no active writers */
+ RTE_ASSERT(n == sa->sqn.inb.rdidx);
+
+ if (!SQN_ATOMIC(sa))
+ return sa->sqn.inb.rsn[n];
+
+ k = REPLAY_SQN_NEXT(n);
+ sa->sqn.inb.wridx = k;
+
+ rsn = sa->sqn.inb.rsn[k];
+ rte_rwlock_write_lock(&rsn->rwl);
+ rsn_copy(sa, k, n);
+
+ return rsn;
+}
+
+/**
+ * Finish RSN update.
+ */
+static inline void
+rsn_update_finish(struct rte_ipsec_sa *sa, struct replay_sqn *rsn)
+{
+ uint32_t n;
+
+ if (!SQN_ATOMIC(sa))
+ return;
+
+ n = sa->sqn.inb.wridx;
+ RTE_ASSERT(n != sa->sqn.inb.rdidx);
+ RTE_ASSERT(rsn == sa->sqn.inb.rsn[n]);
+
+ rte_rwlock_write_unlock(&rsn->rwl);
+ sa->sqn.inb.rdidx = n;
+}
+
+
+#endif /* _IPSEC_SQN_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/meson.build b/src/spdk/dpdk/lib/librte_ipsec/meson.build
new file mode 100644
index 000000000..7dcbff590
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('esp_inb.c', 'esp_outb.c', 'sa.c', 'ses.c', 'ipsec_sad.c')
+
+headers = files('rte_ipsec.h', 'rte_ipsec_group.h', 'rte_ipsec_sa.h', 'rte_ipsec_sad.h')
+
+deps += ['mbuf', 'net', 'cryptodev', 'security', 'hash']
+build = false
+reason = 'not needed by SPDK'
diff --git a/src/spdk/dpdk/lib/librte_ipsec/misc.h b/src/spdk/dpdk/lib/librte_ipsec/misc.h
new file mode 100644
index 000000000..1b543ed87
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/misc.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#ifndef _MISC_H_
+#define _MISC_H_
+
+/**
+ * @file misc.h
+ * Contains miscellaneous functions/structures/macros used internally
+ * by ipsec library.
+ */
+
+/*
+ * Move bad (unprocessed) mbufs beyond the good (processed) ones.
+ * bad_idx[] contains the indexes of bad mbufs inside the mb[].
+ */
+static inline void
+move_bad_mbufs(struct rte_mbuf *mb[], const uint32_t bad_idx[], uint32_t nb_mb,
+ uint32_t nb_bad)
+{
+ uint32_t i, j, k;
+ struct rte_mbuf *drb[nb_bad];
+
+ j = 0;
+ k = 0;
+
+ /* copy bad ones into a temp place */
+ for (i = 0; i != nb_mb; i++) {
+ if (j != nb_bad && i == bad_idx[j])
+ drb[j++] = mb[i];
+ else
+ mb[k++] = mb[i];
+ }
+
+ /* copy bad ones after the good ones */
+ for (i = 0; i != nb_bad; i++)
+ mb[k + i] = drb[i];
+}
+
+/*
+ * Find packet's segment for the specified offset.
+ * ofs - at input should contain required offset, at output would contain
+ * offset value within the segment.
+ */
+static inline struct rte_mbuf *
+mbuf_get_seg_ofs(struct rte_mbuf *mb, uint32_t *ofs)
+{
+ uint32_t k, n, plen;
+ struct rte_mbuf *ms;
+
+ plen = mb->pkt_len;
+ n = *ofs;
+
+ if (n == plen) {
+ ms = rte_pktmbuf_lastseg(mb);
+ n = n + rte_pktmbuf_data_len(ms) - plen;
+ } else {
+ ms = mb;
+ for (k = rte_pktmbuf_data_len(ms); n >= k;
+ k = rte_pktmbuf_data_len(ms)) {
+ ms = ms->next;
+ n -= k;
+ }
+ }
+
+ *ofs = n;
+ return ms;
+}
+
+/*
+ * Trim multi-segment packet at the specified offset, and free
+ * all unused segments.
+ * mb - input packet
+ * ms - segment where to cut
+ * ofs - offset within the *ms*
+ * len - length to cut (from given offset to the end of the packet)
+ * Can be used in conjunction with mbuf_get_seg_ofs():
+ * ofs = new_len;
+ * ms = mbuf_get_seg_ofs(mb, &ofs);
+ * mbuf_cut_seg_ofs(mb, ms, ofs, mb->pkt_len - new_len);
+ */
+static inline void
+mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs,
+ uint32_t len)
+{
+ uint32_t n, slen;
+ struct rte_mbuf *mn;
+
+ slen = ms->data_len;
+ ms->data_len = ofs;
+
+ /* tail spawns through multiple segments */
+ if (slen < ofs + len) {
+ mn = ms->next;
+ ms->next = NULL;
+ for (n = 0; mn != NULL; n++) {
+ ms = mn->next;
+ rte_pktmbuf_free_seg(mn);
+ mn = ms;
+ }
+ mb->nb_segs -= n;
+ }
+
+ mb->pkt_len -= len;
+}
+
+/*
+ * process packets using sync crypto engine.
+ * expects *num* to be greater than zero.
+ */
+static inline void
+cpu_crypto_bulk(const struct rte_ipsec_session *ss,
+ union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
+ void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
+ uint32_t clen[], uint32_t num)
+{
+ uint32_t i, j, n;
+ int32_t vcnt, vofs;
+ int32_t st[num];
+ struct rte_crypto_sgl vecpkt[num];
+ struct rte_crypto_vec vec[UINT8_MAX];
+ struct rte_crypto_sym_vec symvec;
+
+ const uint32_t vnum = RTE_DIM(vec);
+
+ j = 0, n = 0;
+ vofs = 0;
+ for (i = 0; i != num; i++) {
+
+ vcnt = rte_crypto_mbuf_to_vec(mb[i], l4ofs[i], clen[i],
+ &vec[vofs], vnum - vofs);
+
+ /* not enough space in vec[] to hold all segments */
+ if (vcnt < 0) {
+ /* fill the request structure */
+ symvec.sgl = &vecpkt[j];
+ symvec.iv = &iv[j];
+ symvec.aad = &aad[j];
+ symvec.digest = &dgst[j];
+ symvec.status = &st[j];
+ symvec.num = i - j;
+
+ /* flush vec array and try again */
+ n += rte_cryptodev_sym_cpu_crypto_process(
+ ss->crypto.dev_id, ss->crypto.ses, ofs,
+ &symvec);
+ vofs = 0;
+ vcnt = rte_crypto_mbuf_to_vec(mb[i], l4ofs[i], clen[i],
+ vec, vnum);
+ RTE_ASSERT(vcnt > 0);
+ j = i;
+ }
+
+ vecpkt[i].vec = &vec[vofs];
+ vecpkt[i].num = vcnt;
+ vofs += vcnt;
+ }
+
+ /* fill the request structure */
+ symvec.sgl = &vecpkt[j];
+ symvec.iv = &iv[j];
+ symvec.aad = &aad[j];
+ symvec.digest = &dgst[j];
+ symvec.status = &st[j];
+ symvec.num = i - j;
+
+ n += rte_cryptodev_sym_cpu_crypto_process(ss->crypto.dev_id,
+ ss->crypto.ses, ofs, &symvec);
+
+ j = num - n;
+ for (i = 0; j != 0 && i != num; i++) {
+ if (st[i] != 0) {
+ mb[i]->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ j--;
+ }
+ }
+}
+
+#endif /* _MISC_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/pad.h b/src/spdk/dpdk/lib/librte_ipsec/pad.h
new file mode 100644
index 000000000..2f5ccd00e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/pad.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _PAD_H_
+#define _PAD_H_
+
+#define IPSEC_MAX_PAD_SIZE UINT8_MAX
+
+static const uint8_t esp_pad_bytes[IPSEC_MAX_PAD_SIZE] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255,
+};
+
+#endif /* _PAD_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec.h b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec.h
new file mode 100644
index 000000000..6666cf761
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_H_
+#define _RTE_IPSEC_H_
+
+/**
+ * @file rte_ipsec.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE IPsec support.
+ * librte_ipsec provides a framework for data-path IPsec protocol
+ * processing (ESP/AH).
+ */
+
+#include <rte_ipsec_sa.h>
+#include <rte_mbuf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_ipsec_session;
+
+/**
+ * IPsec session specific functions that will be used to:
+ * - prepare - for input mbufs and given IPsec session prepare crypto ops
+ * that can be enqueued into the cryptodev associated with given session
+ * (see *rte_ipsec_pkt_crypto_prepare* below for more details).
+ * - process - finalize processing of packets after crypto-dev finished
+ * with them or process packets that are subjects to inline IPsec offload
+ * (see rte_ipsec_pkt_process for more details).
+ */
+struct rte_ipsec_sa_pkt_func {
+ union {
+ uint16_t (*async)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[],
+ uint16_t num);
+ uint16_t (*sync)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ uint16_t num);
+ } prepare;
+ uint16_t (*process)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ uint16_t num);
+};
+
+/**
+ * rte_ipsec_session is an aggregate structure that defines particular
+ * IPsec Security Association IPsec (SA) on given security/crypto device:
+ * - pointer to the SA object
+ * - security session action type
+ * - pointer to security/crypto session, plus other related data
+ * - session/device specific functions to prepare/process IPsec packets.
+ */
+struct rte_ipsec_session {
+ /**
+ * SA that session belongs to.
+ * Note that multiple sessions can belong to the same SA.
+ */
+ struct rte_ipsec_sa *sa;
+ /** session action type */
+ enum rte_security_session_action_type type;
+ /** session and related data */
+ union {
+ struct {
+ struct rte_cryptodev_sym_session *ses;
+ uint8_t dev_id;
+ } crypto;
+ struct {
+ struct rte_security_session *ses;
+ struct rte_security_ctx *ctx;
+ uint32_t ol_flags;
+ } security;
+ };
+ /** functions to prepare/process IPsec packets */
+ struct rte_ipsec_sa_pkt_func pkt_func;
+} __rte_cache_aligned;
+
+/**
+ * Checks that inside given rte_ipsec_session crypto/security fields
+ * are filled correctly and setups function pointers based on these values.
+ * Expects that all fields except IPsec processing function pointers
+ * (*pkt_func*) will be filled correctly by caller.
+ * @param ss
+ * Pointer to the *rte_ipsec_session* object
+ * @return
+ * - Zero if operation completed successfully.
+ * - -EINVAL if the parameters are invalid.
+ */
+__rte_experimental
+int
+rte_ipsec_session_prepare(struct rte_ipsec_session *ss);
+
+/**
+ * For input mbufs and given IPsec session prepare crypto ops that can be
+ * enqueued into the cryptodev associated with given session.
+ * expects that for each input packet:
+ * - l2_len, l3_len are setup correctly
+ * Note that erroneous mbufs are not freed by the function,
+ * but are placed beyond last valid mbuf in the *mb* array.
+ * It is a user responsibility to handle them further.
+ * @param ss
+ * Pointer to the *rte_ipsec_session* object the packets belong to.
+ * @param mb
+ * The address of an array of *num* pointers to *rte_mbuf* structures
+ * which contain the input packets.
+ * @param cop
+ * The address of an array of *num* pointers to the output *rte_crypto_op*
+ * structures.
+ * @param num
+ * The maximum number of packets to process.
+ * @return
+ * Number of successfully processed packets, with error code set in rte_errno.
+ */
+__rte_experimental
+static inline uint16_t
+rte_ipsec_pkt_crypto_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+ return ss->pkt_func.prepare.async(ss, mb, cop, num);
+}
+
+__rte_experimental
+static inline uint16_t
+rte_ipsec_pkt_cpu_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return ss->pkt_func.prepare.sync(ss, mb, num);
+}
+
+/**
+ * Finalise processing of packets after crypto-dev finished with them or
+ * process packets that are subjects to inline IPsec offload.
+ * Expects that for each input packet:
+ * - l2_len, l3_len are setup correctly
+ * Output mbufs will be:
+ * inbound - decrypted & authenticated, ESP(AH) related headers removed,
+ * *l2_len* and *l3_len* fields are updated.
+ * outbound - appropriate mbuf fields (ol_flags, tx_offloads, etc.)
+ * properly setup, if necessary - IP headers updated, ESP(AH) fields added,
+ * Note that erroneous mbufs are not freed by the function,
+ * but are placed beyond last valid mbuf in the *mb* array.
+ * It is a user responsibility to handle them further.
+ * @param ss
+ * Pointer to the *rte_ipsec_session* object the packets belong to.
+ * @param mb
+ * The address of an array of *num* pointers to *rte_mbuf* structures
+ * which contain the input packets.
+ * @param num
+ * The maximum number of packets to process.
+ * @return
+ * Number of successfully processed packets, with error code set in rte_errno.
+ */
+__rte_experimental
+static inline uint16_t
+rte_ipsec_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num)
+{
+ return ss->pkt_func.process(ss, mb, num);
+}
+
+#include <rte_ipsec_group.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_group.h b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_group.h
new file mode 100644
index 000000000..47b33ca5e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_group.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_GROUP_H_
+#define _RTE_IPSEC_GROUP_H_
+
+/**
+ * @file rte_ipsec_group.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE IPsec support.
+ * It is not recommended to include this file directly,
+ * include <rte_ipsec.h> instead.
+ * Contains helper functions to process completed crypto-ops
+ * and group related packets by sessions they belong to.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Used to group mbufs by some id.
+ * See below for particular usage.
+ */
+struct rte_ipsec_group {
+ union {
+ uint64_t val;
+ void *ptr;
+ } id; /**< grouped by value */
+ struct rte_mbuf **m; /**< start of the group */
+ uint32_t cnt; /**< number of entries in the group */
+ int32_t rc; /**< status code associated with the group */
+};
+
+/**
+ * Take crypto-op as an input and extract pointer to related ipsec session.
+ * @param cop
+ * The address of an input *rte_crypto_op* structure.
+ * @return
+ * The pointer to the related *rte_ipsec_session* structure.
+ */
+__rte_experimental
+static inline struct rte_ipsec_session *
+rte_ipsec_ses_from_crypto(const struct rte_crypto_op *cop)
+{
+ const struct rte_security_session *ss;
+ const struct rte_cryptodev_sym_session *cs;
+
+ if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ ss = cop->sym[0].sec_session;
+ return (void *)(uintptr_t)ss->opaque_data;
+ } else if (cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ cs = cop->sym[0].session;
+ return (void *)(uintptr_t)cs->opaque_data;
+ }
+ return NULL;
+}
+
+/**
+ * Take as input completed crypto ops, extract related mbufs
+ * and group them by rte_ipsec_session they belong to.
+ * For mbuf which crypto-op wasn't completed successfully
+ * PKT_RX_SEC_OFFLOAD_FAILED will be raised in ol_flags.
+ * Note that mbufs with undetermined SA (session-less) are not freed
+ * by the function, but are placed beyond mbufs for the last valid group.
+ * It is a user responsibility to handle them further.
+ * @param cop
+ * The address of an array of *num* pointers to the input *rte_crypto_op*
+ * structures.
+ * @param mb
+ * The address of an array of *num* pointers to output *rte_mbuf* structures.
+ * @param grp
+ * The address of an array of *num* to output *rte_ipsec_group* structures.
+ * @param num
+ * The maximum number of crypto-ops to process.
+ * @return
+ * Number of filled elements in *grp* array.
+ */
+__rte_experimental
+static inline uint16_t
+rte_ipsec_pkt_crypto_group(const struct rte_crypto_op *cop[],
+ struct rte_mbuf *mb[], struct rte_ipsec_group grp[], uint16_t num)
+{
+ uint32_t i, j, k, n;
+ void *ns, *ps;
+ struct rte_mbuf *m, *dr[num];
+
+ j = 0;
+ k = 0;
+ n = 0;
+ ps = NULL;
+
+ for (i = 0; i != num; i++) {
+
+ m = cop[i]->sym[0].m_src;
+ ns = cop[i]->sym[0].session;
+
+ m->ol_flags |= PKT_RX_SEC_OFFLOAD;
+ if (cop[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ m->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+ /* no valid session found */
+ if (ns == NULL) {
+ dr[k++] = m;
+ continue;
+ }
+
+ /* different SA */
+ if (ps != ns) {
+
+ /*
+ * we already have an open group - finalize it,
+ * then open a new one.
+ */
+ if (ps != NULL) {
+ grp[n].id.ptr =
+ rte_ipsec_ses_from_crypto(cop[i - 1]);
+ grp[n].cnt = mb + j - grp[n].m;
+ n++;
+ }
+
+ /* start new group */
+ grp[n].m = mb + j;
+ ps = ns;
+ }
+
+ mb[j++] = m;
+ }
+
+ /* finalise last group */
+ if (ps != NULL) {
+ grp[n].id.ptr = rte_ipsec_ses_from_crypto(cop[i - 1]);
+ grp[n].cnt = mb + j - grp[n].m;
+ n++;
+ }
+
+ /* copy mbufs with unknown session beyond recognised ones */
+ if (k != 0 && k != num) {
+ for (i = 0; i != k; i++)
+ mb[j + i] = dr[i];
+ }
+
+ return n;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_GROUP_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sa.h b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sa.h
new file mode 100644
index 000000000..1cfde5874
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sa.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_SA_H_
+#define _RTE_IPSEC_SA_H_
+
+/**
+ * @file rte_ipsec_sa.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Defines API to manage IPsec Security Association (SA) objects.
+ */
+
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * An opaque structure to represent Security Association (SA).
+ */
+struct rte_ipsec_sa;
+
+/**
+ * SA initialization parameters.
+ */
+struct rte_ipsec_sa_prm {
+
+ uint64_t userdata; /**< provided and interpreted by user */
+ uint64_t flags; /**< see RTE_IPSEC_SAFLAG_* below */
+ /** ipsec configuration */
+ struct rte_security_ipsec_xform ipsec_xform;
+ /** crypto session configuration */
+ struct rte_crypto_sym_xform *crypto_xform;
+ union {
+ struct {
+ uint8_t hdr_len; /**< tunnel header len */
+ uint8_t hdr_l3_off; /**< offset for IPv4/IPv6 header */
+ uint8_t next_proto; /**< next header protocol */
+ const void *hdr; /**< tunnel header template */
+ } tun; /**< tunnel mode related parameters */
+ struct {
+ uint8_t proto; /**< next header protocol */
+ } trs; /**< transport mode related parameters */
+ };
+};
+
+/**
+ * Indicates that SA will(/will not) need an 'atomic' access
+ * to sequence number and replay window.
+ * 'atomic' here means:
+ * functions:
+ * - rte_ipsec_pkt_crypto_prepare
+ * - rte_ipsec_pkt_process
+ * can be safely used in MT environment, as long as the user can guarantee
+ * that they obey multiple readers/single writer model for SQN+replay_window
+ * operations.
+ * To be more specific:
+ * for outbound SA there are no restrictions.
+ * for inbound SA the caller has to guarantee that at any given moment
+ * only one thread is executing rte_ipsec_pkt_process() for given SA.
+ * Note that it is caller responsibility to maintain correct order
+ * of packets to be processed.
+ * In other words - it is a caller responsibility to serialize process()
+ * invocations.
+ */
+#define RTE_IPSEC_SAFLAG_SQN_ATOM (1ULL << 0)
+
+/**
+ * SA type is an 64-bit value that contain the following information:
+ * - IP version (IPv4/IPv6)
+ * - IPsec proto (ESP/AH)
+ * - inbound/outbound
+ * - mode (TRANSPORT/TUNNEL)
+ * - for TUNNEL outer IP version (IPv4/IPv6)
+ * - are SA SQN operations 'atomic'
+ * - ESN enabled/disabled
+ * ...
+ */
+
+enum {
+ RTE_SATP_LOG2_IPV,
+ RTE_SATP_LOG2_PROTO,
+ RTE_SATP_LOG2_DIR,
+ RTE_SATP_LOG2_MODE,
+ RTE_SATP_LOG2_SQN = RTE_SATP_LOG2_MODE + 2,
+ RTE_SATP_LOG2_ESN,
+ RTE_SATP_LOG2_ECN,
+ RTE_SATP_LOG2_DSCP,
+ RTE_SATP_LOG2_NUM
+};
+
+#define RTE_IPSEC_SATP_IPV_MASK (1ULL << RTE_SATP_LOG2_IPV)
+#define RTE_IPSEC_SATP_IPV4 (0ULL << RTE_SATP_LOG2_IPV)
+#define RTE_IPSEC_SATP_IPV6 (1ULL << RTE_SATP_LOG2_IPV)
+
+#define RTE_IPSEC_SATP_PROTO_MASK (1ULL << RTE_SATP_LOG2_PROTO)
+#define RTE_IPSEC_SATP_PROTO_AH (0ULL << RTE_SATP_LOG2_PROTO)
+#define RTE_IPSEC_SATP_PROTO_ESP (1ULL << RTE_SATP_LOG2_PROTO)
+
+#define RTE_IPSEC_SATP_DIR_MASK (1ULL << RTE_SATP_LOG2_DIR)
+#define RTE_IPSEC_SATP_DIR_IB (0ULL << RTE_SATP_LOG2_DIR)
+#define RTE_IPSEC_SATP_DIR_OB (1ULL << RTE_SATP_LOG2_DIR)
+
+#define RTE_IPSEC_SATP_MODE_MASK (3ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TRANS (0ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV4 (1ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV6 (2ULL << RTE_SATP_LOG2_MODE)
+
+#define RTE_IPSEC_SATP_SQN_MASK (1ULL << RTE_SATP_LOG2_SQN)
+#define RTE_IPSEC_SATP_SQN_RAW (0ULL << RTE_SATP_LOG2_SQN)
+#define RTE_IPSEC_SATP_SQN_ATOM (1ULL << RTE_SATP_LOG2_SQN)
+
+#define RTE_IPSEC_SATP_ESN_MASK (1ULL << RTE_SATP_LOG2_ESN)
+#define RTE_IPSEC_SATP_ESN_DISABLE (0ULL << RTE_SATP_LOG2_ESN)
+#define RTE_IPSEC_SATP_ESN_ENABLE (1ULL << RTE_SATP_LOG2_ESN)
+
+#define RTE_IPSEC_SATP_ECN_MASK (1ULL << RTE_SATP_LOG2_ECN)
+#define RTE_IPSEC_SATP_ECN_DISABLE (0ULL << RTE_SATP_LOG2_ECN)
+#define RTE_IPSEC_SATP_ECN_ENABLE (1ULL << RTE_SATP_LOG2_ECN)
+
+#define RTE_IPSEC_SATP_DSCP_MASK (1ULL << RTE_SATP_LOG2_DSCP)
+#define RTE_IPSEC_SATP_DSCP_DISABLE (0ULL << RTE_SATP_LOG2_DSCP)
+#define RTE_IPSEC_SATP_DSCP_ENABLE (1ULL << RTE_SATP_LOG2_DSCP)
+
+/**
+ * get type of given SA
+ * @return
+ * SA type value.
+ */
+__rte_experimental
+uint64_t
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa);
+
+/**
+ * Calculate required SA size based on provided input parameters.
+ * @param prm
+ * Parameters that will be used to initialise SA object.
+ * @return
+ * - Actual size required for SA with given parameters.
+ * - -EINVAL if the parameters are invalid.
+ */
+__rte_experimental
+int
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm);
+
+/**
+ * initialise SA based on provided input parameters.
+ * @param sa
+ * SA object to initialise.
+ * @param prm
+ * Parameters used to initialise given SA object.
+ * @param size
+ * size of the provided buffer for SA.
+ * @return
+ * - Actual size of SA object if operation completed successfully.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if the size of the provided buffer is not big enough.
+ */
+__rte_experimental
+int
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ uint32_t size);
+
+/**
+ * cleanup SA
+ * @param sa
+ * Pointer to SA object to de-initialize.
+ */
+__rte_experimental
+void
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_SA_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sad.h b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sad.h
new file mode 100644
index 000000000..dcc82249e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_sad.h
@@ -0,0 +1,178 @@
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_SAD_H_
+#define _RTE_IPSEC_SAD_H_
+
+#include <rte_compat.h>
+
+/**
+ * @file rte_ipsec_sad.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE IPsec security association database (SAD) support.
+ * Contains helper functions to lookup and maintain SAD
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_ipsec_sad;
+
+/** Type of key */
+enum {
+ RTE_IPSEC_SAD_SPI_ONLY = 0,
+ RTE_IPSEC_SAD_SPI_DIP,
+ RTE_IPSEC_SAD_SPI_DIP_SIP,
+ RTE_IPSEC_SAD_KEY_TYPE_MASK,
+};
+
+struct rte_ipsec_sadv4_key {
+ uint32_t spi;
+ uint32_t dip;
+ uint32_t sip;
+};
+
+struct rte_ipsec_sadv6_key {
+ uint32_t spi;
+ uint8_t dip[16];
+ uint8_t sip[16];
+};
+
+union rte_ipsec_sad_key {
+ struct rte_ipsec_sadv4_key v4;
+ struct rte_ipsec_sadv6_key v6;
+};
+
+/** Max number of characters in SAD name. */
+#define RTE_IPSEC_SAD_NAMESIZE 64
+/** Flag to create SAD with ipv6 dip and sip addresses */
+#define RTE_IPSEC_SAD_FLAG_IPV6 0x1
+/** Flag to support reader writer concurrency */
+#define RTE_IPSEC_SAD_FLAG_RW_CONCURRENCY 0x2
+
+/** IPsec SAD configuration structure */
+struct rte_ipsec_sad_conf {
+ /** CPU socket ID where rte_ipsec_sad should be allocated */
+ int socket_id;
+ /** maximum number of SA for each type of key */
+ uint32_t max_sa[RTE_IPSEC_SAD_KEY_TYPE_MASK];
+ /** RTE_IPSEC_SAD_FLAG_* flags */
+ uint32_t flags;
+};
+
+/**
+ * Add a rule into the SAD. Could be safely called with concurrent lookups
+ * if RTE_IPSEC_SAD_FLAG_RW_CONCURRENCY flag was configured on creation time.
+ * While with this flag multi-reader - one-writer model Is MT safe,
+ * multi-writer model is not and required extra synchronisation.
+ *
+ * @param sad
+ * SAD object handle
+ * @param key
+ * pointer to the key
+ * @param key_type
+ * key type (spi only/spi+dip/spi+dip+sip)
+ * @param sa
+ * Pointer associated with the key to save in a SAD
+ * Must be 4 bytes aligned.
+ * @return
+ * 0 on success, negative value otherwise
+ */
+__rte_experimental
+int
+rte_ipsec_sad_add(struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *key,
+ int key_type, void *sa);
+
+/**
+ * Delete a rule from the SAD. Could be safely called with concurrent lookups
+ * if RTE_IPSEC_SAD_FLAG_RW_CONCURRENCY flag was configured on creation time.
+ * While with this flag multi-reader - one-writer model Is MT safe,
+ * multi-writer model is not and required extra synchronisation.
+ *
+ * @param sad
+ * SAD object handle
+ * @param key
+ * pointer to the key
+ * @param key_type
+ * key type (spi only/spi+dip/spi+dip+sip)
+ * @return
+ * 0 on success, negative value otherwise
+ */
+__rte_experimental
+int
+rte_ipsec_sad_del(struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *key,
+ int key_type);
+/*
+ * Create SAD
+ *
+ * @param name
+ * SAD name
+ * @param conf
+ * Structure containing the configuration
+ * @return
+ * Handle to SAD object on success
+ * NULL otherwise with rte_errno set to an appropriate values.
+ */
+__rte_experimental
+struct rte_ipsec_sad *
+rte_ipsec_sad_create(const char *name, const struct rte_ipsec_sad_conf *conf);
+
+/**
+ * Find an existing SAD object and return a pointer to it.
+ *
+ * @param name
+ * Name of the SAD object as passed to rte_ipsec_sad_create()
+ * @return
+ * Pointer to sad object or NULL if object not found with rte_errno
+ * set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+__rte_experimental
+struct rte_ipsec_sad *
+rte_ipsec_sad_find_existing(const char *name);
+
+/**
+ * Destroy SAD object.
+ *
+ * @param sad
+ * pointer to the SAD object
+ * @return
+ * None
+ */
+__rte_experimental
+void
+rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad);
+
+/**
+ * Lookup multiple keys in the SAD.
+ *
+ * @param sad
+ * SAD object handle
+ * @param keys
+ * Array of keys to be looked up in the SAD
+ * @param sa
+ * Pointer assocoated with the keys.
+ * If the lookup for the given key failed, then corresponding sa
+ * will be NULL
+ * @param n
+ * Number of elements in keys array to lookup.
+ * @return
+ * -EINVAL for incorrect arguments, otherwise number of successful lookups.
+ */
+__rte_experimental
+int
+rte_ipsec_sad_lookup(const struct rte_ipsec_sad *sad,
+ const union rte_ipsec_sad_key *keys[],
+ void *sa[], uint32_t n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_SAD_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_version.map b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_version.map
new file mode 100644
index 000000000..f37c867bf
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/rte_ipsec_version.map
@@ -0,0 +1,21 @@
+EXPERIMENTAL {
+ global:
+
+ rte_ipsec_pkt_crypto_group;
+ rte_ipsec_pkt_crypto_prepare;
+ rte_ipsec_pkt_process;
+ rte_ipsec_sa_fini;
+ rte_ipsec_sa_init;
+ rte_ipsec_sa_size;
+ rte_ipsec_sa_type;
+ rte_ipsec_sad_add;
+ rte_ipsec_sad_create;
+ rte_ipsec_sad_del;
+ rte_ipsec_sad_destroy;
+ rte_ipsec_sad_find_existing;
+ rte_ipsec_sad_lookup;
+ rte_ipsec_ses_from_crypto;
+ rte_ipsec_session_prepare;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/lib/librte_ipsec/sa.c b/src/spdk/dpdk/lib/librte_ipsec/sa.c
new file mode 100644
index 000000000..e59189d21
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/sa.c
@@ -0,0 +1,764 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+#define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
+#define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
+
+/* some helper structures */
+struct crypto_xform {
+ struct rte_crypto_auth_xform *auth;
+ struct rte_crypto_cipher_xform *cipher;
+ struct rte_crypto_aead_xform *aead;
+};
+
+/*
+ * helper routine, fills internal crypto_xform structure.
+ */
+static int
+fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
+ const struct rte_ipsec_sa_prm *prm)
+{
+ struct rte_crypto_sym_xform *xf, *xfn;
+
+ memset(xform, 0, sizeof(*xform));
+
+ xf = prm->crypto_xform;
+ if (xf == NULL)
+ return -EINVAL;
+
+ xfn = xf->next;
+
+ /* for AEAD just one xform required */
+ if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xfn != NULL)
+ return -EINVAL;
+ xform->aead = &xf->aead;
+ /*
+ * CIPHER+AUTH xforms are expected in strict order,
+ * depending on SA direction:
+ * inbound: AUTH+CIPHER
+ * outbound: CIPHER+AUTH
+ */
+ } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+
+ /* wrong order or no cipher */
+ if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
+ xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return -EINVAL;
+
+ xform->auth = &xf->auth;
+ xform->cipher = &xfn->cipher;
+
+ } else {
+
+ /* wrong order or no auth */
+ if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
+ xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -EINVAL;
+
+ xform->cipher = &xf->cipher;
+ xform->auth = &xfn->auth;
+ }
+
+ return 0;
+}
+
+uint64_t
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
+{
+ return sa->type;
+}
+
+/**
+ * Based on number of buckets calculated required size for the
+ * structure that holds replay window and sequence number (RSN) information.
+ */
+static size_t
+rsn_size(uint32_t nb_bucket)
+{
+ size_t sz;
+ struct replay_sqn *rsn;
+
+ sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
+ sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
+ return sz;
+}
+
+/*
+ * for given size, calculate required number of buckets.
+ */
+static uint32_t
+replay_num_bucket(uint32_t wsz)
+{
+ uint32_t nb;
+
+ nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
+ WINDOW_BUCKET_SIZE);
+ nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
+
+ return nb;
+}
+
+static int32_t
+ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
+{
+ uint32_t n, sz, wsz;
+
+ wsz = *wnd_sz;
+ n = 0;
+
+ if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+
+ /*
+ * RFC 4303 recommends 64 as minimum window size.
+ * there is no point to use ESN mode without SQN window,
+ * so make sure we have at least 64 window when ESN is enalbed.
+ */
+ wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
+ RTE_IPSEC_SATP_ESN_DISABLE) ?
+ wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
+ if (wsz != 0)
+ n = replay_num_bucket(wsz);
+ }
+
+ if (n > WINDOW_BUCKET_MAX)
+ return -EINVAL;
+
+ *wnd_sz = wsz;
+ *nb_bucket = n;
+
+ sz = rsn_size(n);
+ if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
+ sz *= REPLAY_SQN_NUM;
+
+ sz += sizeof(struct rte_ipsec_sa);
+ return sz;
+}
+
+void
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
+{
+ memset(sa, 0, sa->size);
+}
+
+/*
+ * Determine expected SA type based on input parameters.
+ */
+static int
+fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
+{
+ uint64_t tp;
+
+ tp = 0;
+
+ if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+ tp |= RTE_IPSEC_SATP_PROTO_AH;
+ else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ tp |= RTE_IPSEC_SATP_PROTO_ESP;
+ else
+ return -EINVAL;
+
+ if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ tp |= RTE_IPSEC_SATP_DIR_OB;
+ else if (prm->ipsec_xform.direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+ tp |= RTE_IPSEC_SATP_DIR_IB;
+ else
+ return -EINVAL;
+
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ if (prm->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+ tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
+ else if (prm->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+ tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
+ else
+ return -EINVAL;
+
+ if (prm->tun.next_proto == IPPROTO_IPIP)
+ tp |= RTE_IPSEC_SATP_IPV4;
+ else if (prm->tun.next_proto == IPPROTO_IPV6)
+ tp |= RTE_IPSEC_SATP_IPV6;
+ else
+ return -EINVAL;
+ } else if (prm->ipsec_xform.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+ tp |= RTE_IPSEC_SATP_MODE_TRANS;
+ if (prm->trs.proto == IPPROTO_IPIP)
+ tp |= RTE_IPSEC_SATP_IPV4;
+ else if (prm->trs.proto == IPPROTO_IPV6)
+ tp |= RTE_IPSEC_SATP_IPV6;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ /* check for ESN flag */
+ if (prm->ipsec_xform.options.esn == 0)
+ tp |= RTE_IPSEC_SATP_ESN_DISABLE;
+ else
+ tp |= RTE_IPSEC_SATP_ESN_ENABLE;
+
+ /* check for ECN flag */
+ if (prm->ipsec_xform.options.ecn == 0)
+ tp |= RTE_IPSEC_SATP_ECN_DISABLE;
+ else
+ tp |= RTE_IPSEC_SATP_ECN_ENABLE;
+
+ /* check for DSCP flag */
+ if (prm->ipsec_xform.options.copy_dscp == 0)
+ tp |= RTE_IPSEC_SATP_DSCP_DISABLE;
+ else
+ tp |= RTE_IPSEC_SATP_DSCP_ENABLE;
+
+ /* interpret flags */
+ if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
+ tp |= RTE_IPSEC_SATP_SQN_ATOM;
+ else
+ tp |= RTE_IPSEC_SATP_SQN_RAW;
+
+ *type = tp;
+ return 0;
+}
+
+/*
+ * Init ESP inbound specific things.
+ */
+static void
+esp_inb_init(struct rte_ipsec_sa *sa)
+{
+ /* these params may differ with new algorithms support */
+ sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
+ sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
+
+ /*
+ * for AEAD and NULL algorithms we can assume that
+ * auth and cipher offsets would be equal.
+ */
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ case ALGO_TYPE_NULL:
+ sa->ctp.auth.raw = sa->ctp.cipher.raw;
+ break;
+ default:
+ sa->ctp.auth.offset = 0;
+ sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
+ sa->cofs.ofs.cipher.tail = sa->sqh_len;
+ break;
+ }
+
+ sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
+}
+
+/*
+ * Init ESP inbound tunnel specific things.
+ */
+static void
+esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+ sa->proto = prm->tun.next_proto;
+ esp_inb_init(sa);
+}
+
+/*
+ * Init ESP outbound specific things.
+ */
+static void
+esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
+{
+ uint8_t algo_type;
+
+ sa->sqn.outb = 1;
+
+ algo_type = sa->algo_type;
+
+ /*
+ * Setup auth and cipher length and offset.
+ * these params may differ with new algorithms support
+ */
+
+ switch (algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ case ALGO_TYPE_AES_CTR:
+ case ALGO_TYPE_NULL:
+ sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
+ sa->iv_len;
+ sa->ctp.cipher.length = 0;
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
+ sa->ctp.cipher.length = sa->iv_len;
+ break;
+ }
+
+ /*
+ * for AEAD and NULL algorithms we can assume that
+ * auth and cipher offsets would be equal.
+ */
+ switch (algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ case ALGO_TYPE_NULL:
+ sa->ctp.auth.raw = sa->ctp.cipher.raw;
+ break;
+ default:
+ sa->ctp.auth.offset = hlen;
+ sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
+ sa->iv_len + sa->sqh_len;
+ break;
+ }
+
+ sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
+ sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
+ (sa->ctp.cipher.offset + sa->ctp.cipher.length);
+}
+
+/*
+ * Init ESP outbound tunnel specific things.
+ */
+static void
+esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+ sa->proto = prm->tun.next_proto;
+ sa->hdr_len = prm->tun.hdr_len;
+ sa->hdr_l3_off = prm->tun.hdr_l3_off;
+
+ /* update l2_len and l3_len fields for outbound mbuf */
+ sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
+ sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
+
+ memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
+
+ esp_outb_init(sa, sa->hdr_len);
+}
+
+/*
+ * helper function, init SA structure.
+ */
+static int
+esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ const struct crypto_xform *cxf)
+{
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ if (prm->ipsec_xform.options.ecn)
+ sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK;
+
+ if (prm->ipsec_xform.options.copy_dscp)
+ sa->tos_mask |= RTE_IPV4_HDR_DSCP_MASK;
+
+ if (cxf->aead != NULL) {
+ switch (cxf->aead->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ /* RFC 4106 */
+ sa->aad_len = sizeof(struct aead_gcm_aad);
+ sa->icv_len = cxf->aead->digest_length;
+ sa->iv_ofs = cxf->aead->iv.offset;
+ sa->iv_len = sizeof(uint64_t);
+ sa->pad_align = IPSEC_PAD_AES_GCM;
+ sa->algo_type = ALGO_TYPE_AES_GCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ sa->icv_len = cxf->auth->digest_length;
+ sa->iv_ofs = cxf->cipher->iv.offset;
+ sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
+
+ switch (cxf->cipher->algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ sa->pad_align = IPSEC_PAD_NULL;
+ sa->iv_len = 0;
+ sa->algo_type = ALGO_TYPE_NULL;
+ break;
+
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sa->pad_align = IPSEC_PAD_AES_CBC;
+ sa->iv_len = IPSEC_MAX_IV_SIZE;
+ sa->algo_type = ALGO_TYPE_AES_CBC;
+ break;
+
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ /* RFC 3686 */
+ sa->pad_align = IPSEC_PAD_AES_CTR;
+ sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
+ sa->algo_type = ALGO_TYPE_AES_CTR;
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ /* RFC 1851 */
+ sa->pad_align = IPSEC_PAD_3DES_CBC;
+ sa->iv_len = IPSEC_3DES_IV_SIZE;
+ sa->algo_type = ALGO_TYPE_3DES_CBC;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ sa->udata = prm->userdata;
+ sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
+ sa->salt = prm->ipsec_xform.salt;
+
+ /* preserve all values except l2_len and l3_len */
+ sa->tx_offload.msk =
+ ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
+ 0, 0, 0, 0, 0);
+
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ esp_inb_tun_init(sa, prm);
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ esp_inb_init(sa);
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ esp_outb_tun_init(sa, prm);
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ esp_outb_init(sa, 0);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * helper function, init SA replay structure.
+ */
+static void
+fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
+{
+ sa->replay.win_sz = wnd_sz;
+ sa->replay.nb_bucket = nb_bucket;
+ sa->replay.bucket_index_mask = nb_bucket - 1;
+ sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
+ if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
+ sa->sqn.inb.rsn[1] = (struct replay_sqn *)
+ ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
+}
+
+int
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
+{
+ uint64_t type;
+ uint32_t nb, wsz;
+ int32_t rc;
+
+ if (prm == NULL)
+ return -EINVAL;
+
+ /* determine SA type */
+ rc = fill_sa_type(prm, &type);
+ if (rc != 0)
+ return rc;
+
+ /* determine required size */
+ wsz = prm->ipsec_xform.replay_win_sz;
+ return ipsec_sa_size(type, &wsz, &nb);
+}
+
+int
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ uint32_t size)
+{
+ int32_t rc, sz;
+ uint32_t nb, wsz;
+ uint64_t type;
+ struct crypto_xform cxf;
+
+ if (sa == NULL || prm == NULL)
+ return -EINVAL;
+
+ /* determine SA type */
+ rc = fill_sa_type(prm, &type);
+ if (rc != 0)
+ return rc;
+
+ /* determine required size */
+ wsz = prm->ipsec_xform.replay_win_sz;
+ sz = ipsec_sa_size(type, &wsz, &nb);
+ if (sz < 0)
+ return sz;
+ else if (size < (uint32_t)sz)
+ return -ENOSPC;
+
+ /* only esp is supported right now */
+ if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ return -EINVAL;
+
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ prm->tun.hdr_len > sizeof(sa->hdr))
+ return -EINVAL;
+
+ rc = fill_crypto_xform(&cxf, type, prm);
+ if (rc != 0)
+ return rc;
+
+ /* initialize SA */
+
+ memset(sa, 0, sz);
+ sa->type = type;
+ sa->size = sz;
+
+ /* check for ESN flag */
+ sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
+ UINT32_MAX : UINT64_MAX;
+
+ rc = esp_sa_init(sa, prm, &cxf);
+ if (rc != 0)
+ rte_ipsec_sa_fini(sa);
+
+ /* fill replay window related fields */
+ if (nb != 0)
+ fill_sa_replay(sa, wsz, nb);
+
+ return sz;
+}
+
+/*
+ * setup crypto ops for LOOKASIDE_PROTO type of devices.
+ */
+static inline void
+lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+ uint32_t i;
+ struct rte_crypto_sym_op *sop;
+
+ for (i = 0; i != num; i++) {
+ sop = cop[i]->sym;
+ cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
+ sop->m_src = mb[i];
+ __rte_security_attach_session(sop, ss->security.ses);
+ }
+}
+
+/*
+ * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
+ * Note that for LOOKASIDE_PROTO all packet modifications will be
+ * performed by PMD/HW.
+ * SW has only to prepare crypto op.
+ */
+static uint16_t
+lksd_proto_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+ lksd_proto_cop_prepare(ss, mb, cop, num);
+ return num;
+}
+
+/*
+ * simplest pkt process routine:
+ * all actual processing is already done by HW/PMD,
+ * just check mbuf ol_flags.
+ * used for:
+ * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
+ * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
+ * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
+ */
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ uint32_t i, k;
+ uint32_t dr[num];
+
+ RTE_SET_USED(ss);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
+ k++;
+ else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num) {
+ rte_errno = EBADMSG;
+ if (k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ return k;
+}
+
+/*
+ * Select packet processing function for session on LOOKASIDE_NONE
+ * type of device.
+ */
+static int
+lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare.async = esp_inb_pkt_prepare;
+ pf->process = esp_inb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare.async = esp_inb_pkt_prepare;
+ pf->process = esp_inb_trs_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare.async = esp_outb_tun_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare.async = esp_outb_trs_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+static int
+cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare.sync = cpu_inb_pkt_prepare;
+ pf->process = esp_inb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare.sync = cpu_inb_pkt_prepare;
+ pf->process = esp_inb_trs_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare.sync = cpu_outb_tun_pkt_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare.sync = cpu_outb_trs_pkt_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+/*
+ * Select packet processing function for session on INLINE_CRYPTO
+ * type of device.
+ */
+static int
+inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = inline_inb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = inline_inb_trs_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = inline_outb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = inline_outb_trs_pkt_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+/*
+ * Select packet processing function for given session based on SA parameters
+ * and type of associated with the session device.
+ */
+int
+ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
+ const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ rc = 0;
+ pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
+
+ switch (ss->type) {
+ case RTE_SECURITY_ACTION_TYPE_NONE:
+ rc = lksd_none_pkt_func_select(sa, pf);
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ rc = inline_crypto_pkt_func_select(sa, pf);
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
+ RTE_IPSEC_SATP_DIR_IB)
+ pf->process = pkt_flag_process;
+ else
+ pf->process = inline_proto_outb_pkt_process;
+ break;
+ case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+ pf->prepare.async = lksd_proto_prepare;
+ pf->process = pkt_flag_process;
+ break;
+ case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+ rc = cpu_crypto_pkt_func_select(sa, pf);
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/lib/librte_ipsec/sa.h b/src/spdk/dpdk/lib/librte_ipsec/sa.h
new file mode 100644
index 000000000..1bffe751f
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/sa.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#ifndef _SA_H_
+#define _SA_H_
+
+#include <rte_rwlock.h>
+
+#define IPSEC_MAX_HDR_SIZE 64
+#define IPSEC_MAX_IV_SIZE 16
+#define IPSEC_MAX_IV_QWORD (IPSEC_MAX_IV_SIZE / sizeof(uint64_t))
+#define TUN_HDR_MSK (RTE_IPSEC_SATP_ECN_MASK | RTE_IPSEC_SATP_DSCP_MASK)
+
+/* padding alignment for different algorithms */
+enum {
+ IPSEC_PAD_DEFAULT = 4,
+ IPSEC_PAD_3DES_CBC = 8,
+ IPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,
+ IPSEC_PAD_AES_CTR = IPSEC_PAD_DEFAULT,
+ IPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,
+ IPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,
+};
+
+/* iv sizes for different algorithms */
+enum {
+ IPSEC_IV_SIZE_DEFAULT = IPSEC_MAX_IV_SIZE,
+ IPSEC_AES_CTR_IV_SIZE = sizeof(uint64_t),
+ /* TripleDES supports IV size of 32bits or 64bits but he library
+ * only supports 64bits.
+ */
+ IPSEC_3DES_IV_SIZE = sizeof(uint64_t),
+};
+
+/* these definitions probably has to be in rte_crypto_sym.h */
+union sym_op_ofslen {
+ uint64_t raw;
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ };
+};
+
+union sym_op_data {
+#ifdef __SIZEOF_INT128__
+ __uint128_t raw;
+#endif
+ struct {
+ uint8_t *va;
+ rte_iova_t pa;
+ };
+};
+
+#define REPLAY_SQN_NUM 2
+#define REPLAY_SQN_NEXT(n) ((n) ^ 1)
+
+struct replay_sqn {
+ rte_rwlock_t rwl;
+ uint64_t sqn;
+ __extension__ uint64_t window[0];
+};
+
+/*IPSEC SA supported algorithms */
+enum sa_algo_type {
+ ALGO_TYPE_NULL = 0,
+ ALGO_TYPE_3DES_CBC,
+ ALGO_TYPE_AES_CBC,
+ ALGO_TYPE_AES_CTR,
+ ALGO_TYPE_AES_GCM,
+ ALGO_TYPE_MAX
+};
+
+struct rte_ipsec_sa {
+
+ uint64_t type; /* type of given SA */
+ uint64_t udata; /* user defined */
+ uint32_t size; /* size of given sa object */
+ uint32_t spi;
+ /* sqn calculations related */
+ uint64_t sqn_mask;
+ struct {
+ uint32_t win_sz;
+ uint16_t nb_bucket;
+ uint16_t bucket_index_mask;
+ } replay;
+ /* template for crypto op fields */
+ struct {
+ union sym_op_ofslen cipher;
+ union sym_op_ofslen auth;
+ } ctp;
+ /* cpu-crypto offsets */
+ union rte_crypto_sym_ofs cofs;
+ /* tx_offload template for tunnel mbuf */
+ struct {
+ uint64_t msk;
+ uint64_t val;
+ } tx_offload;
+ uint32_t salt;
+ uint8_t algo_type;
+ uint8_t proto; /* next proto */
+ uint8_t aad_len;
+ uint8_t hdr_len;
+ uint8_t hdr_l3_off;
+ uint8_t icv_len;
+ uint8_t sqh_len;
+ uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */
+ uint8_t iv_len;
+ uint8_t pad_align;
+ uint8_t tos_mask;
+
+ /* template for tunnel header */
+ uint8_t hdr[IPSEC_MAX_HDR_SIZE];
+
+ /*
+ * sqn and replay window
+ * In case of SA handled by multiple threads *sqn* cacheline
+ * could be shared by multiple cores.
+ * To minimise performance impact, we try to locate in a separate
+ * place from other frequently accesed data.
+ */
+ union {
+ uint64_t outb;
+ struct {
+ uint32_t rdidx; /* read index */
+ uint32_t wridx; /* write index */
+ struct replay_sqn *rsn[REPLAY_SQN_NUM];
+ } inb;
+ } sqn;
+
+} __rte_cache_aligned;
+
+int
+ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
+ const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf);
+
+/* inbound processing */
+
+uint16_t
+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+/* outbound processing */
+
+uint16_t
+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num);
+
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+#endif /* _SA_H_ */
diff --git a/src/spdk/dpdk/lib/librte_ipsec/ses.c b/src/spdk/dpdk/lib/librte_ipsec/ses.c
new file mode 100644
index 000000000..3d51ac498
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_ipsec/ses.c
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2020 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include "sa.h"
+
+static int
+session_check(struct rte_ipsec_session *ss)
+{
+ if (ss == NULL || ss->sa == NULL)
+ return -EINVAL;
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
+ if (ss->crypto.ses == NULL)
+ return -EINVAL;
+ } else {
+ if (ss->security.ses == NULL)
+ return -EINVAL;
+ if ((ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) &&
+ ss->security.ctx == NULL)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_ipsec_session_prepare(struct rte_ipsec_session *ss)
+{
+ int32_t rc;
+ struct rte_ipsec_sa_pkt_func fp;
+
+ rc = session_check(ss);
+ if (rc != 0)
+ return rc;
+
+ rc = ipsec_sa_pkt_func_select(ss, ss->sa, &fp);
+ if (rc != 0)
+ return rc;
+
+ ss->pkt_func = fp;
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE)
+ ss->crypto.ses->opaque_data = (uintptr_t)ss;
+ else
+ ss->security.ses->opaque_data = (uintptr_t)ss;
+
+ return 0;
+}