summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/lib/librte_ipsec
diff options
context:
space:
mode:
Diffstat (limited to 'src/seastar/dpdk/lib/librte_ipsec')
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/Makefile29
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/crypto.h182
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/esp_inb.c547
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/esp_outb.c580
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/iph.h84
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/ipsec_sqn.h309
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/meson.build10
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/misc.h41
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/pad.h45
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/rte_ipsec.h154
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_group.h151
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_sa.h174
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_version.map15
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/sa.c668
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/sa.h175
-rw-r--r--src/seastar/dpdk/lib/librte_ipsec/ses.c52
16 files changed, 3216 insertions, 0 deletions
diff --git a/src/seastar/dpdk/lib/librte_ipsec/Makefile b/src/seastar/dpdk/lib/librte_ipsec/Makefile
new file mode 100644
index 000000000..e80926baa
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ipsec.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_net -lrte_cryptodev -lrte_security
+
+EXPORT_MAP := rte_ipsec_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_inb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_outb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += ses.c
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_group.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_sa.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/lib/librte_ipsec/crypto.h b/src/seastar/dpdk/lib/librte_ipsec/crypto.h
new file mode 100644
index 000000000..45d4ef7bf
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/crypto.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _CRYPTO_H_
+#define _CRYPTO_H_
+
+/**
+ * @file crypto.h
+ * Contains crypto specific functions/structures/macros used internally
+ * by ipsec library.
+ */
+
+/*
+ * AES-CTR counter block format.
+ */
+
+struct aesctr_cnt_blk {
+ uint32_t nonce;
+ uint64_t iv;
+ uint32_t cnt;
+} __attribute__((packed));
+
+ /*
+ * AES-GCM devices have some specific requirements for IV and AAD formats.
+ * Ideally that to be done by the driver itself.
+ */
+
+struct aead_gcm_iv {
+ uint32_t salt;
+ uint64_t iv;
+ uint32_t cnt;
+} __attribute__((packed));
+
+struct aead_gcm_aad {
+ uint32_t spi;
+ /*
+ * RFC 4106, section 5:
+ * Two formats of the AAD are defined:
+ * one for 32-bit sequence numbers, and one for 64-bit ESN.
+ */
+ union {
+ uint32_t u32[2];
+ uint64_t u64;
+ } sqn;
+ uint32_t align0; /* align to 16B boundary */
+} __attribute__((packed));
+
+struct gcm_esph_iv {
+ struct esp_hdr esph;
+ uint64_t iv;
+} __attribute__((packed));
+
+static inline void
+aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)
+{
+ ctr->nonce = nonce;
+ ctr->iv = iv;
+ ctr->cnt = rte_cpu_to_be_32(1);
+}
+
+static inline void
+aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
+{
+ gcm->salt = salt;
+ gcm->iv = iv;
+ gcm->cnt = rte_cpu_to_be_32(1);
+}
+
+/*
+ * RFC 4106, 5 AAD Construction
+ * spi and sqn should already be converted into network byte order.
+ * Make sure that not used bytes are zeroed.
+ */
+static inline void
+aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
+ int esn)
+{
+ aad->spi = spi;
+ if (esn)
+ aad->sqn.u64 = sqn;
+ else {
+ aad->sqn.u32[0] = sqn_low32(sqn);
+ aad->sqn.u32[1] = 0;
+ }
+ aad->align0 = 0;
+}
+
+static inline void
+gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
+{
+ iv[0] = sqn;
+ iv[1] = 0;
+}
+
+/*
+ * Helper routine to copy IV
+ * Right now we support only algorithms with IV length equals 0/8/16 bytes.
+ */
+static inline void
+copy_iv(uint64_t dst[IPSEC_MAX_IV_QWORD],
+ const uint64_t src[IPSEC_MAX_IV_QWORD], uint32_t len)
+{
+ RTE_BUILD_BUG_ON(IPSEC_MAX_IV_SIZE != 2 * sizeof(uint64_t));
+
+ switch (len) {
+ case IPSEC_MAX_IV_SIZE:
+ dst[1] = src[1];
+ /* fallthrough */
+ case sizeof(uint64_t):
+ dst[0] = src[0];
+ /* fallthrough */
+ case 0:
+ break;
+ default:
+ /* should never happen */
+ RTE_ASSERT(NULL);
+ }
+}
+
+/*
+ * from RFC 4303 3.3.2.1.4:
+ * If the ESN option is enabled for the SA, the high-order 32
+ * bits of the sequence number are appended after the Next Header field
+ * for purposes of this computation, but are not transmitted.
+ */
+
+/*
+ * Helper function that moves ICV by 4B below, and inserts SQN.hibits.
+ * icv parameter points to the new start of ICV.
+ */
+static inline void
+insert_sqh(uint32_t sqh, void *picv, uint32_t icv_len)
+{
+ uint32_t *icv;
+ int32_t i;
+
+ RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
+
+ icv = picv;
+ icv_len = icv_len / sizeof(uint32_t);
+ for (i = icv_len; i-- != 0; icv[i] = icv[i - 1])
+ ;
+
+ icv[i] = sqh;
+}
+
+/*
+ * Helper function that moves ICV by 4B up, and removes SQN.hibits.
+ * icv parameter points to the new start of ICV.
+ */
+static inline void
+remove_sqh(void *picv, uint32_t icv_len)
+{
+ uint32_t i, *icv;
+
+ RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
+
+ icv = picv;
+ icv_len = icv_len / sizeof(uint32_t);
+ for (i = 0; i != icv_len; i++)
+ icv[i] = icv[i + 1];
+}
+
+/*
+ * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
+ */
+static inline void
+lksd_none_cop_prepare(struct rte_crypto_op *cop,
+ struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
+{
+ struct rte_crypto_sym_op *sop;
+
+ sop = cop->sym;
+ cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ sop->m_src = mb;
+ __rte_crypto_sym_op_attach_sym_session(sop, cs);
+}
+
+#endif /* _CRYPTO_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/esp_inb.c b/src/seastar/dpdk/lib/librte_ipsec/esp_inb.c
new file mode 100644
index 000000000..4e0e12a85
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/esp_inb.c
@@ -0,0 +1,547 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
+ struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by inb_cop_prepare(), see below.
+ */
+static inline void
+sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->cipher.data.length = plen - sa->ctp.cipher.length;
+ sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+ sop->auth.data.length = plen - sa->ctp.auth.length;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+}
+
+/*
+ * helper function to fill crypto_sym op for aead algorithms
+ * used by inb_cop_prepare(), see below.
+ */
+static inline void
+sop_aead_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->aead.data.length = plen - sa->ctp.cipher.length;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+}
+
+/*
+ * setup crypto op and crypto sym op for ESP inbound packet.
+ */
+static inline void
+inb_cop_prepare(struct rte_crypto_op *cop,
+ const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
+{
+ struct rte_crypto_sym_op *sop;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivc, *ivp;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct esp_hdr));
+
+ /* fill sym op fields */
+ sop = cop->sym;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ sop_aead_prepare(sop, sa, icv, pofs, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
+
+ /* copy iv from the input packet to the cop */
+ ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
+ copy_iv(ivc, ivp, sa->iv_len);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
+
+ /* fill CTR block (located inside crypto op) */
+ ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+ sa->iv_ofs);
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
+ break;
+ }
+}
+
+/*
+ * for pure cryptodev (lookaside none) depending on SA settings,
+ * we might have to write some extra data to the packet.
+ */
+static inline void
+inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const union sym_op_data *icv)
+{
+ struct aead_gcm_aad *aad;
+
+ /* insert SQN.hi between ESP trailer and ICV */
+ if (sa->sqh_len != 0)
+ insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
+
+ /*
+ * fill AAD fields, if any (aad fields are placed after icv),
+ * right now we support only one AEAD algorithm: AES-GCM.
+ */
+ if (sa->aad_len != 0) {
+ aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ }
+}
+
+/*
+ * setup/update packet data and metadata for ESP inbound tunnel case.
+ */
+static inline int32_t
+inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+ struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+{
+ int32_t rc;
+ uint64_t sqn;
+ uint32_t clen, icv_ofs, plen;
+ struct rte_mbuf *ml;
+ struct esp_hdr *esph;
+
+ esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
+
+ /*
+ * retrieve and reconstruct SQN, then check it, then
+ * convert it back into network byte order.
+ */
+ sqn = rte_be_to_cpu_32(esph->seq);
+ if (IS_ESN(sa))
+ sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+
+ rc = esn_inb_check_sqn(rsn, sa, sqn);
+ if (rc != 0)
+ return rc;
+
+ sqn = rte_cpu_to_be_64(sqn);
+
+ /* start packet manipulation */
+ plen = mb->pkt_len;
+ plen = plen - hlen;
+
+ ml = rte_pktmbuf_lastseg(mb);
+ icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
+
+ /* check that packet has a valid length */
+ clen = plen - sa->ctp.cipher.length;
+ if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
+ return -EBADMSG;
+
+ /* we have to allocate space for AAD somewhere,
+ * right now - just use free trailing space at the last segment.
+ * Would probably be more convenient to reserve space for AAD
+ * inside rte_crypto_op itself
+ * (again for IV space is already reserved inside cop).
+ */
+ if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
+
+ inb_pkt_xprepare(sa, sqn, icv);
+ return plen;
+}
+
+/*
+ * setup/update packets and crypto ops for ESP inbound case.
+ */
+uint16_t
+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, hl;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+ rsn = rsn_acquire(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ hl = mb[i]->l2_len + mb[i]->l3_len;
+ rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+ if (rc >= 0) {
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ rsn_release(sa, rsn);
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num && k != 0) {
+ move_bad_mbufs(mb, dr, num, num - k);
+ rte_errno = EBADMSG;
+ }
+
+ return k;
+}
+
+/*
+ * Start with processing inbound packet.
+ * This is common part for both tunnel and transport mode.
+ * Extract information that will be needed later from mbuf metadata and
+ * actual packet data:
+ * - mbuf for packet's last segment
+ * - length of the L2/L3 headers
+ * - esp tail structure
+ */
+static inline void
+process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
+ struct esp_tail *espt, uint32_t *hlen)
+{
+ const struct esp_tail *pt;
+
+ ml[0] = rte_pktmbuf_lastseg(mb);
+ hlen[0] = mb->l2_len + mb->l3_len;
+ pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *,
+ ml[0]->data_len - tlen);
+ espt[0] = pt[0];
+}
+
+/*
+ * packet checks for transport mode:
+ * - no reported IPsec related failures in ol_flags
+ * - tail length is valid
+ * - padding bytes are valid
+ */
+static inline int32_t
+trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml,
+ struct esp_tail espt, uint32_t hlen, uint32_t tlen)
+{
+ const uint8_t *pd;
+ int32_t ofs;
+
+ ofs = ml->data_len - tlen;
+ pd = rte_pktmbuf_mtod_offset(ml, const uint8_t *, ofs);
+
+ return ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
+ ofs < 0 || tlen + hlen > mb->pkt_len ||
+ (espt.pad_len != 0 && memcmp(pd, esp_pad_bytes, espt.pad_len)));
+}
+
+/*
+ * packet checks for tunnel mode:
+ * - same as for trasnport mode
+ * - esp tail next proto contains expected for that SA value
+ */
+static inline int32_t
+tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml,
+ struct esp_tail espt, uint32_t hlen, const uint32_t tlen, uint8_t proto)
+{
+ return (trs_process_check(mb, ml, espt, hlen, tlen) ||
+ espt.next_proto != proto);
+}
+
+/*
+ * step two for tunnel mode:
+ * - read SQN value (for future use)
+ * - cut of ICV, ESP tail and padding bytes
+ * - cut of ESP header and IV, also if needed - L2/L3 headers
+ * (controlled by *adj* value)
+ */
+static inline void *
+tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
+ uint32_t adj, uint32_t tlen, uint32_t *sqn)
+{
+ const struct esp_hdr *ph;
+
+ /* read SQN value */
+ ph = rte_pktmbuf_mtod_offset(mb, const struct esp_hdr *, hlen);
+ sqn[0] = ph->seq;
+
+ /* cut of ICV, ESP tail and padding bytes */
+ ml->data_len -= tlen;
+ mb->pkt_len -= tlen;
+
+ /* cut of L2/L3 headers, ESP header and IV */
+ return rte_pktmbuf_adj(mb, adj);
+}
+
+/*
+ * step two for transport mode:
+ * - read SQN value (for future use)
+ * - cut of ICV, ESP tail and padding bytes
+ * - cut of ESP header and IV
+ * - move L2/L3 header to fill the gap after ESP header removal
+ */
+static inline void *
+trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
+ uint32_t adj, uint32_t tlen, uint32_t *sqn)
+{
+ char *np, *op;
+
+ /* get start of the packet before modifications */
+ op = rte_pktmbuf_mtod(mb, char *);
+
+ /* cut off ESP header and IV */
+ np = tun_process_step2(mb, ml, hlen, adj, tlen, sqn);
+
+ /* move header bytes to fill the gap after ESP header removal */
+ remove_esph(np, op, hlen);
+ return np;
+}
+
+/*
+ * step three for transport mode:
+ * update mbuf metadata:
+ * - packet_type
+ * - ol_flags
+ */
+static inline void
+trs_process_step3(struct rte_mbuf *mb)
+{
+ /* reset mbuf packet type */
+ mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+}
+
+/*
+ * step three for tunnel mode:
+ * update mbuf metadata:
+ * - packet_type
+ * - ol_flags
+ * - tx_offload
+ */
+static inline void
+tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
+{
+ /* reset mbuf metatdata: L2/L3 len, packet type */
+ mb->packet_type = RTE_PTYPE_UNKNOWN;
+ mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
+
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+}
+
+
+/*
+ * *process* function for tunnel packets
+ */
+static inline uint16_t
+tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint32_t sqn[], uint32_t dr[], uint16_t num)
+{
+ uint32_t adj, i, k, tl;
+ uint32_t hl[num];
+ struct esp_tail espt[num];
+ struct rte_mbuf *ml[num];
+
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
+ const uint32_t cofs = sa->ctp.cipher.offset;
+
+ /*
+ * to minimize stalls due to load latency,
+ * read mbufs metadata and esp tail first.
+ */
+ for (i = 0; i != num; i++)
+ process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ adj = hl[i] + cofs;
+ tl = tlen + espt[i].pad_len;
+
+ /* check that packet is valid */
+ if (tun_process_check(mb[i], ml[i], espt[i], adj, tl,
+ sa->proto) == 0) {
+
+ /* modify packet's layout */
+ tun_process_step2(mb[i], ml[i], hl[i], adj,
+ tl, sqn + k);
+ /* update mbuf's metadata */
+ tun_process_step3(mb[i], sa->tx_offload.msk,
+ sa->tx_offload.val);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ return k;
+}
+
+
+/*
+ * *process* function for tunnel packets
+ */
+static inline uint16_t
+trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint32_t sqn[], uint32_t dr[], uint16_t num)
+{
+ char *np;
+ uint32_t i, k, l2, tl;
+ uint32_t hl[num];
+ struct esp_tail espt[num];
+ struct rte_mbuf *ml[num];
+
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
+ const uint32_t cofs = sa->ctp.cipher.offset;
+
+ /*
+ * to minimize stalls due to load latency,
+ * read mbufs metadata and esp tail first.
+ */
+ for (i = 0; i != num; i++)
+ process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ tl = tlen + espt[i].pad_len;
+ l2 = mb[i]->l2_len;
+
+ /* check that packet is valid */
+ if (trs_process_check(mb[i], ml[i], espt[i], hl[i] + cofs,
+ tl) == 0) {
+
+ /* modify packet's layout */
+ np = trs_process_step2(mb[i], ml[i], hl[i], cofs, tl,
+ sqn + k);
+ update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
+ l2, hl[i] - l2, espt[i].next_proto);
+
+ /* update mbuf's metadata */
+ trs_process_step3(mb[i]);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ return k;
+}
+
+/*
+ * for group of ESP inbound packets perform SQN check and update.
+ */
+static inline uint16_t
+esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
+ uint32_t dr[], uint16_t num)
+{
+ uint32_t i, k;
+ struct replay_sqn *rsn;
+
+ /* replay not enabled */
+ if (sa->replay.win_sz == 0)
+ return num;
+
+ rsn = rsn_update_start(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
+ k++;
+ else
+ dr[i - k] = i;
+ }
+
+ rsn_update_finish(sa, rsn);
+ return k;
+}
+
+/*
+ * process group of ESP inbound packets.
+ */
+static inline uint16_t
+esp_inb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
+{
+ uint32_t k, n;
+ struct rte_ipsec_sa *sa;
+ uint32_t sqn[num];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ /* process packets, extract seq numbers */
+ k = process(sa, mb, sqn, dr, num);
+
+ /* handle unprocessed mbufs */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ /* update SQN and replay winow */
+ n = esp_inb_rsn_update(sa, sqn, dr, k);
+
+ /* handle mbufs with wrong SQN */
+ if (n != k && n != 0)
+ move_bad_mbufs(mb, dr, k, k - n);
+
+ if (n != num)
+ rte_errno = EBADMSG;
+
+ return n;
+}
+
+/*
+ * process group of ESP inbound tunnel packets.
+ */
+uint16_t
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss, mb, num, tun_process);
+}
+
+/*
+ * process group of ESP inbound transport packets.
+ */
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss, mb, num, trs_process);
+}
diff --git a/src/seastar/dpdk/lib/librte_ipsec/esp_outb.c b/src/seastar/dpdk/lib/librte_ipsec/esp_outb.c
new file mode 100644
index 000000000..c798bc4c4
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/esp_outb.c
@@ -0,0 +1,580 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by outb_cop_prepare(), see below.
+ */
+static inline void
+sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
+ sop->cipher.data.length = sa->ctp.cipher.length + plen;
+ sop->auth.data.offset = sa->ctp.auth.offset + pofs;
+ sop->auth.data.length = sa->ctp.auth.length + plen;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+}
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by outb_cop_prepare(), see below.
+ */
+static inline void
+sop_aead_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
+ sop->aead.data.length = sa->ctp.cipher.length + plen;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+}
+
+/*
+ * setup crypto op and crypto sym op for ESP outbound packet.
+ */
+static inline void
+outb_cop_prepare(struct rte_crypto_op *cop,
+ const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
+ const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
+{
+ struct rte_crypto_sym_op *sop;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+
+ /* fill sym op fields */
+ sop = cop->sym;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_CBC:
+ /* Cipher-Auth (AES-CBC *) case */
+ case ALGO_TYPE_3DES_CBC:
+ /* Cipher-Auth (3DES-CBC *) case */
+ case ALGO_TYPE_NULL:
+ /* NULL case */
+ sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
+ break;
+ case ALGO_TYPE_AES_GCM:
+ /* AEAD (AES_GCM) case */
+ sop_aead_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ /* Cipher-Auth (AES-CTR *) case */
+ sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill CTR block (located inside crypto op) */
+ ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+ sa->iv_ofs);
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+}
+
+/*
+ * setup/update packet data and metadata for ESP outbound tunnel case.
+ */
+static inline int32_t
+outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ union sym_op_data *icv)
+{
+ uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
+ struct rte_mbuf *ml;
+ struct esp_hdr *esph;
+ struct esp_tail *espt;
+ char *ph, *pt;
+ uint64_t *iv;
+
+ /* calculate extra header space required */
+ hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
+
+ /* size of ipsec protected data */
+ l2len = mb->l2_len;
+ plen = mb->pkt_len - l2len;
+
+ /* number of bytes to encrypt */
+ clen = plen + sizeof(*espt);
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len;
+
+ /* do append and prepend */
+ ml = rte_pktmbuf_lastseg(mb);
+ if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ /* prepend header */
+ ph = rte_pktmbuf_prepend(mb, hlen - l2len);
+ if (ph == NULL)
+ return -ENOSPC;
+
+ /* append tail */
+ pdofs = ml->data_len;
+ ml->data_len += tlen;
+ mb->pkt_len += tlen;
+ pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
+
+ /* update pkt l2/l3 len */
+ mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
+ sa->tx_offload.val;
+
+ /* copy tunnel pkt header */
+ rte_memcpy(ph, sa->hdr, sa->hdr_len);
+
+ /* update original and new ip header fields */
+ update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
+ sqn_low16(sqc));
+
+ /* update spi, seqn and iv */
+ esph = (struct esp_hdr *)(ph + sa->hdr_len);
+ iv = (uint64_t *)(esph + 1);
+ copy_iv(iv, ivp, sa->iv_len);
+
+ esph->spi = sa->spi;
+ esph->seq = sqn_low32(sqc);
+
+ /* offset for ICV */
+ pdofs += pdlen + sa->sqh_len;
+
+ /* pad length */
+ pdlen -= sizeof(*espt);
+
+ /* copy padding data */
+ rte_memcpy(pt, esp_pad_bytes, pdlen);
+
+ /* update esp trailer */
+ espt = (struct esp_tail *)(pt + pdlen);
+ espt->pad_len = pdlen;
+ espt->next_proto = sa->proto;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+
+ return clen;
+}
+
+/*
+ * for pure cryptodev (lookaside none) depending on SA settings,
+ * we might have to write some extra data to the packet.
+ */
+static inline void
+outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const union sym_op_data *icv)
+{
+ uint32_t *psqh;
+ struct aead_gcm_aad *aad;
+
+ /* insert SQN.hi between ESP trailer and ICV */
+ if (sa->sqh_len != 0) {
+ psqh = (uint32_t *)(icv->va - sa->sqh_len);
+ psqh[0] = sqn_hi32(sqc);
+ }
+
+ /*
+ * fill IV and AAD fields, if any (aad fields are placed after icv),
+ * right now we support only one AEAD algorithm: AES-GCM .
+ */
+ if (sa->aad_len != 0) {
+ aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ }
+}
+
+/*
+ * setup/update packets and crypto ops for ESP outbound tunnel case.
+ */
+uint16_t
+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ return k;
+}
+
+/*
+ * setup/update packet data and metadata for ESP outbound transport case.
+ */
+static inline int32_t
+outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
+{
+ uint8_t np;
+ uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
+ struct rte_mbuf *ml;
+ struct esp_hdr *esph;
+ struct esp_tail *espt;
+ char *ph, *pt;
+ uint64_t *iv;
+
+ uhlen = l2len + l3len;
+ plen = mb->pkt_len - uhlen;
+
+ /* calculate extra header space required */
+ hlen = sa->iv_len + sizeof(*esph);
+
+ /* number of bytes to encrypt */
+ clen = plen + sizeof(*espt);
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len;
+
+ /* do append and insert */
+ ml = rte_pktmbuf_lastseg(mb);
+ if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ /* prepend space for ESP header */
+ ph = rte_pktmbuf_prepend(mb, hlen);
+ if (ph == NULL)
+ return -ENOSPC;
+
+ /* append tail */
+ pdofs = ml->data_len;
+ ml->data_len += tlen;
+ mb->pkt_len += tlen;
+ pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
+
+ /* shift L2/L3 headers */
+ insert_esph(ph, ph + hlen, uhlen);
+
+ /* update ip header fields */
+ np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
+ IPPROTO_ESP);
+
+ /* update spi, seqn and iv */
+ esph = (struct esp_hdr *)(ph + uhlen);
+ iv = (uint64_t *)(esph + 1);
+ copy_iv(iv, ivp, sa->iv_len);
+
+ esph->spi = sa->spi;
+ esph->seq = sqn_low32(sqc);
+
+ /* offset for ICV */
+ pdofs += pdlen + sa->sqh_len;
+
+ /* pad length */
+ pdlen -= sizeof(*espt);
+
+ /* copy padding data */
+ rte_memcpy(pt, esp_pad_bytes, pdlen);
+
+ /* update esp trailer */
+ espt = (struct esp_tail *)(pt + pdlen);
+ espt->pad_len = pdlen;
+ espt->next_proto = np;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+
+ return clen;
+}
+
+/*
+ * setup/update packets and crypto ops for ESP outbound transport case.
+ */
+uint16_t
+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n, l2, l3;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ return k;
+}
+
+/*
+ * process outbound packets for SA with ESN support,
+ * for algorithms that require SQN.hibits to be implictly included
+ * into digest computation.
+ * In that case we have to move ICV bytes back to their proper place.
+ */
+uint16_t
+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num)
+{
+ uint32_t i, k, icv_len, *icv;
+ struct rte_mbuf *ml;
+ struct rte_ipsec_sa *sa;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ k = 0;
+ icv_len = sa->icv_len;
+
+ for (i = 0; i != num; i++) {
+ if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
+ ml = rte_pktmbuf_lastseg(mb[i]);
+ icv = rte_pktmbuf_mtod_offset(ml, void *,
+ ml->data_len - icv_len);
+ remove_sqh(icv, icv_len);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num) {
+ rte_errno = EBADMSG;
+ if (k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ return k;
+}
+
+/*
+ * prepare packets for inline ipsec processing:
+ * set ol_flags and attach metadata.
+ */
+static inline void
+inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ uint32_t i, ol_flags;
+
+ ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
+ for (i = 0; i != num; i++) {
+
+ mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ if (ol_flags != 0)
+ rte_security_set_pkt_metadata(ss->security.ctx,
+ ss->security.ses, mb[i], NULL);
+ }
+}
+
+/*
+ * process group of ESP outbound tunnel packets destined for
+ * INLINE_CRYPTO type of device.
+ */
+uint16_t
+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
+
+ k += (rc >= 0);
+
+ /* failure, put packet into the death-row */
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not processed mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ inline_outb_mbuf_prepare(ss, mb, k);
+ return k;
+}
+
+/*
+ * process group of ESP outbound transport packets destined for
+ * INLINE_CRYPTO type of device.
+ */
+uint16_t
+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n, l2, l3;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
+ l2, l3, &icv);
+
+ k += (rc >= 0);
+
+ /* failure, put packet into the death-row */
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not processed mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ inline_outb_mbuf_prepare(ss, mb, k);
+ return k;
+}
+
+/*
+ * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ * actual processing is done by HW/PMD, just set flags and metadata.
+ */
+uint16_t
+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ inline_outb_mbuf_prepare(ss, mb, num);
+ return num;
+}
diff --git a/src/seastar/dpdk/lib/librte_ipsec/iph.h b/src/seastar/dpdk/lib/librte_ipsec/iph.h
new file mode 100644
index 000000000..58930cf18
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/iph.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IPH_H_
+#define _IPH_H_
+
+/**
+ * @file iph.h
+ * Contains functions/structures/macros to manipulate IPv4/IPv6 headers
+ * used internally by ipsec library.
+ */
+
+/*
+ * Move preceding (L3) headers down to remove ESP header and IV.
+ */
+static inline void
+remove_esph(char *np, char *op, uint32_t hlen)
+{
+ uint32_t i;
+
+ for (i = hlen; i-- != 0; np[i] = op[i])
+ ;
+}
+
+/*
+ * Move preceding (L3) headers up to free space for ESP header and IV.
+ */
+static inline void
+insert_esph(char *np, char *op, uint32_t hlen)
+{
+ uint32_t i;
+
+ for (i = 0; i != hlen; i++)
+ np[i] = op[i];
+}
+
+/* update original ip header fields for transport case */
+static inline int
+update_trs_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
+ uint32_t l2len, uint32_t l3len, uint8_t proto)
+{
+ struct ipv4_hdr *v4h;
+ struct ipv6_hdr *v6h;
+ int32_t rc;
+
+ if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
+ v4h = p;
+ rc = v4h->next_proto_id;
+ v4h->next_proto_id = proto;
+ v4h->total_length = rte_cpu_to_be_16(plen - l2len);
+ } else if (l3len == sizeof(*v6h)) {
+ v6h = p;
+ rc = v6h->proto;
+ v6h->proto = proto;
+ v6h->payload_len = rte_cpu_to_be_16(plen - l2len -
+ sizeof(*v6h));
+ /* need to add support for IPv6 with options */
+ } else
+ rc = -ENOTSUP;
+
+ return rc;
+}
+
+/* update original and new ip header fields for tunnel case */
+static inline void
+update_tun_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
+ uint32_t l2len, rte_be16_t pid)
+{
+ struct ipv4_hdr *v4h;
+ struct ipv6_hdr *v6h;
+
+ if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) {
+ v4h = p;
+ v4h->packet_id = pid;
+ v4h->total_length = rte_cpu_to_be_16(plen - l2len);
+ } else {
+ v6h = p;
+ v6h->payload_len = rte_cpu_to_be_16(plen - l2len -
+ sizeof(*v6h));
+ }
+}
+
+#endif /* _IPH_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/ipsec_sqn.h b/src/seastar/dpdk/lib/librte_ipsec/ipsec_sqn.h
new file mode 100644
index 000000000..0c2f76a7a
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/ipsec_sqn.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IPSEC_SQN_H_
+#define _IPSEC_SQN_H_
+
+#define WINDOW_BUCKET_BITS 6 /* uint64_t */
+#define WINDOW_BUCKET_SIZE (1 << WINDOW_BUCKET_BITS)
+#define WINDOW_BIT_LOC_MASK (WINDOW_BUCKET_SIZE - 1)
+
+/* minimum number of bucket, power of 2*/
+#define WINDOW_BUCKET_MIN 2
+#define WINDOW_BUCKET_MAX (INT16_MAX + 1)
+
+#define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX)
+
+#define SQN_ATOMIC(sa) ((sa)->type & RTE_IPSEC_SATP_SQN_ATOM)
+
+/*
+ * gets SQN.hi32 bits, SQN supposed to be in network byte order.
+ */
+static inline rte_be32_t
+sqn_hi32(rte_be64_t sqn)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ return (sqn >> 32);
+#else
+ return sqn;
+#endif
+}
+
+/*
+ * gets SQN.low32 bits, SQN supposed to be in network byte order.
+ */
+static inline rte_be32_t
+sqn_low32(rte_be64_t sqn)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ return sqn;
+#else
+ return (sqn >> 32);
+#endif
+}
+
+/*
+ * gets SQN.low16 bits, SQN supposed to be in network byte order.
+ */
+static inline rte_be16_t
+sqn_low16(rte_be64_t sqn)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ return sqn;
+#else
+ return (sqn >> 48);
+#endif
+}
+
+/*
+ * According to RFC4303 A2.1, determine the high-order bit of sequence number.
+ * use 32bit arithmetic inside, return uint64_t.
+ */
+static inline uint64_t
+reconstruct_esn(uint64_t t, uint32_t sqn, uint32_t w)
+{
+ uint32_t th, tl, bl;
+
+ tl = t;
+ th = t >> 32;
+ bl = tl - w + 1;
+
+ /* case A: window is within one sequence number subspace */
+ if (tl >= (w - 1))
+ th += (sqn < bl);
+ /* case B: window spans two sequence number subspaces */
+ else if (th != 0)
+ th -= (sqn >= bl);
+
+ /* return constructed sequence with proper high-order bits */
+ return (uint64_t)th << 32 | sqn;
+}
+
+/**
+ * Perform the replay checking.
+ *
+ * struct rte_ipsec_sa contains the window and window related parameters,
+ * such as the window size, bitmask, and the last acknowledged sequence number.
+ *
+ * Based on RFC 6479.
+ * Blocks are 64 bits unsigned integers
+ */
+static inline int32_t
+esn_inb_check_sqn(const struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
+ uint64_t sqn)
+{
+ uint32_t bit, bucket;
+
+ /* replay not enabled */
+ if (sa->replay.win_sz == 0)
+ return 0;
+
+ /* seq is larger than lastseq */
+ if (sqn > rsn->sqn)
+ return 0;
+
+ /* seq is outside window */
+ if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
+ return -EINVAL;
+
+ /* seq is inside the window */
+ bit = sqn & WINDOW_BIT_LOC_MASK;
+ bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask;
+
+ /* already seen packet */
+ if (rsn->window[bucket] & ((uint64_t)1 << bit))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * For outbound SA perform the sequence number update.
+ */
+static inline uint64_t
+esn_outb_update_sqn(struct rte_ipsec_sa *sa, uint32_t *num)
+{
+ uint64_t n, s, sqn;
+
+ n = *num;
+ if (SQN_ATOMIC(sa))
+ sqn = (uint64_t)rte_atomic64_add_return(&sa->sqn.outb.atom, n);
+ else {
+ sqn = sa->sqn.outb.raw + n;
+ sa->sqn.outb.raw = sqn;
+ }
+
+ /* overflow */
+ if (sqn > sa->sqn_mask) {
+ s = sqn - sa->sqn_mask;
+ *num = (s < n) ? n - s : 0;
+ }
+
+ return sqn - n;
+}
+
+/**
+ * For inbound SA perform the sequence number and replay window update.
+ */
+static inline int32_t
+esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
+ uint64_t sqn)
+{
+ uint32_t bit, bucket, last_bucket, new_bucket, diff, i;
+
+ /* handle ESN */
+ if (IS_ESN(sa))
+ sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+
+ /* seq is outside window*/
+ if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
+ return -EINVAL;
+
+ /* update the bit */
+ bucket = (sqn >> WINDOW_BUCKET_BITS);
+
+ /* check if the seq is within the range */
+ if (sqn > rsn->sqn) {
+ last_bucket = rsn->sqn >> WINDOW_BUCKET_BITS;
+ diff = bucket - last_bucket;
+ /* seq is way after the range of WINDOW_SIZE */
+ if (diff > sa->replay.nb_bucket)
+ diff = sa->replay.nb_bucket;
+
+ for (i = 0; i != diff; i++) {
+ new_bucket = (i + last_bucket + 1) &
+ sa->replay.bucket_index_mask;
+ rsn->window[new_bucket] = 0;
+ }
+ rsn->sqn = sqn;
+ }
+
+ bucket &= sa->replay.bucket_index_mask;
+ bit = (uint64_t)1 << (sqn & WINDOW_BIT_LOC_MASK);
+
+ /* already seen packet */
+ if (rsn->window[bucket] & bit)
+ return -EINVAL;
+
+ rsn->window[bucket] |= bit;
+ return 0;
+}
+
+/**
+ * To achieve ability to do multiple readers single writer for
+ * SA replay window information and sequence number (RSN)
+ * basic RCU schema is used:
+ * SA have 2 copies of RSN (one for readers, another for writers).
+ * Each RSN contains a rwlock that has to be grabbed (for read/write)
+ * to avoid races between readers and writer.
+ * Writer is responsible to make a copy or reader RSN, update it
+ * and mark newly updated RSN as readers one.
+ * That approach is intended to minimize contention and cache sharing
+ * between writer and readers.
+ */
+
+/**
+ * Copy replay window and SQN.
+ */
+static inline void
+rsn_copy(const struct rte_ipsec_sa *sa, uint32_t dst, uint32_t src)
+{
+ uint32_t i, n;
+ struct replay_sqn *d;
+ const struct replay_sqn *s;
+
+ d = sa->sqn.inb.rsn[dst];
+ s = sa->sqn.inb.rsn[src];
+
+ n = sa->replay.nb_bucket;
+
+ d->sqn = s->sqn;
+ for (i = 0; i != n; i++)
+ d->window[i] = s->window[i];
+}
+
+/**
+ * Get RSN for read-only access.
+ */
+static inline struct replay_sqn *
+rsn_acquire(struct rte_ipsec_sa *sa)
+{
+ uint32_t n;
+ struct replay_sqn *rsn;
+
+ n = sa->sqn.inb.rdidx;
+ rsn = sa->sqn.inb.rsn[n];
+
+ if (!SQN_ATOMIC(sa))
+ return rsn;
+
+ /* check there are no writers */
+ while (rte_rwlock_read_trylock(&rsn->rwl) < 0) {
+ rte_pause();
+ n = sa->sqn.inb.rdidx;
+ rsn = sa->sqn.inb.rsn[n];
+ rte_compiler_barrier();
+ }
+
+ return rsn;
+}
+
+/**
+ * Release read-only access for RSN.
+ */
+static inline void
+rsn_release(struct rte_ipsec_sa *sa, struct replay_sqn *rsn)
+{
+ if (SQN_ATOMIC(sa))
+ rte_rwlock_read_unlock(&rsn->rwl);
+}
+
+/**
+ * Start RSN update.
+ */
+static inline struct replay_sqn *
+rsn_update_start(struct rte_ipsec_sa *sa)
+{
+ uint32_t k, n;
+ struct replay_sqn *rsn;
+
+ n = sa->sqn.inb.wridx;
+
+ /* no active writers */
+ RTE_ASSERT(n == sa->sqn.inb.rdidx);
+
+ if (!SQN_ATOMIC(sa))
+ return sa->sqn.inb.rsn[n];
+
+ k = REPLAY_SQN_NEXT(n);
+ sa->sqn.inb.wridx = k;
+
+ rsn = sa->sqn.inb.rsn[k];
+ rte_rwlock_write_lock(&rsn->rwl);
+ rsn_copy(sa, k, n);
+
+ return rsn;
+}
+
+/**
+ * Finish RSN update.
+ */
+static inline void
+rsn_update_finish(struct rte_ipsec_sa *sa, struct replay_sqn *rsn)
+{
+ uint32_t n;
+
+ if (!SQN_ATOMIC(sa))
+ return;
+
+ n = sa->sqn.inb.wridx;
+ RTE_ASSERT(n != sa->sqn.inb.rdidx);
+ RTE_ASSERT(rsn == sa->sqn.inb.rsn[n]);
+
+ rte_rwlock_write_unlock(&rsn->rwl);
+ sa->sqn.inb.rdidx = n;
+}
+
+
+#endif /* _IPSEC_SQN_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/meson.build b/src/seastar/dpdk/lib/librte_ipsec/meson.build
new file mode 100644
index 000000000..7ea0c7dbb
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+
+sources = files('esp_inb.c', 'esp_outb.c', 'sa.c', 'ses.c')
+
+headers = files('rte_ipsec.h', 'rte_ipsec_group.h', 'rte_ipsec_sa.h')
+
+deps += ['mbuf', 'net', 'cryptodev', 'security']
diff --git a/src/seastar/dpdk/lib/librte_ipsec/misc.h b/src/seastar/dpdk/lib/librte_ipsec/misc.h
new file mode 100644
index 000000000..693a4afdd
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/misc.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _MISC_H_
+#define _MISC_H_
+
+/**
+ * @file misc.h
+ * Contains miscellaneous functions/structures/macros used internally
+ * by ipsec library.
+ */
+
+/*
+ * Move bad (unprocessed) mbufs beyond the good (processed) ones.
+ * bad_idx[] contains the indexes of bad mbufs inside the mb[].
+ */
+static inline void
+move_bad_mbufs(struct rte_mbuf *mb[], const uint32_t bad_idx[], uint32_t nb_mb,
+ uint32_t nb_bad)
+{
+ uint32_t i, j, k;
+ struct rte_mbuf *drb[nb_bad];
+
+ j = 0;
+ k = 0;
+
+ /* copy bad ones into a temp place */
+ for (i = 0; i != nb_mb; i++) {
+ if (j != nb_bad && i == bad_idx[j])
+ drb[j++] = mb[i];
+ else
+ mb[k++] = mb[i];
+ }
+
+ /* copy bad ones after the good ones */
+ for (i = 0; i != nb_bad; i++)
+ mb[k + i] = drb[i];
+}
+
+#endif /* _MISC_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/pad.h b/src/seastar/dpdk/lib/librte_ipsec/pad.h
new file mode 100644
index 000000000..2f5ccd00e
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/pad.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _PAD_H_
+#define _PAD_H_
+
+#define IPSEC_MAX_PAD_SIZE UINT8_MAX
+
+static const uint8_t esp_pad_bytes[IPSEC_MAX_PAD_SIZE] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255,
+};
+
+#endif /* _PAD_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec.h b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec.h
new file mode 100644
index 000000000..ff1ec801e
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_H_
+#define _RTE_IPSEC_H_
+
+/**
+ * @file rte_ipsec.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE IPsec support.
+ * librte_ipsec provides a framework for data-path IPsec protocol
+ * processing (ESP/AH).
+ */
+
+#include <rte_ipsec_sa.h>
+#include <rte_mbuf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_ipsec_session;
+
+/**
+ * IPsec session specific functions that will be used to:
+ * - prepare - for input mbufs and given IPsec session prepare crypto ops
+ * that can be enqueued into the cryptodev associated with given session
+ * (see *rte_ipsec_pkt_crypto_prepare* below for more details).
+ * - process - finalize processing of packets after crypto-dev finished
+ * with them or process packets that are subjects to inline IPsec offload
+ * (see rte_ipsec_pkt_process for more details).
+ */
+struct rte_ipsec_sa_pkt_func {
+ uint16_t (*prepare)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[],
+ uint16_t num);
+ uint16_t (*process)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ uint16_t num);
+};
+
+/**
+ * rte_ipsec_session is an aggregate structure that defines particular
+ * IPsec Security Association IPsec (SA) on given security/crypto device:
+ * - pointer to the SA object
+ * - security session action type
+ * - pointer to security/crypto session, plus other related data
+ * - session/device specific functions to prepare/process IPsec packets.
+ */
+struct rte_ipsec_session {
+ /**
+ * SA that session belongs to.
+ * Note that multiple sessions can belong to the same SA.
+ */
+ struct rte_ipsec_sa *sa;
+ /** session action type */
+ enum rte_security_session_action_type type;
+ /** session and related data */
+ union {
+ struct {
+ struct rte_cryptodev_sym_session *ses;
+ } crypto;
+ struct {
+ struct rte_security_session *ses;
+ struct rte_security_ctx *ctx;
+ uint32_t ol_flags;
+ } security;
+ };
+ /** functions to prepare/process IPsec packets */
+ struct rte_ipsec_sa_pkt_func pkt_func;
+} __rte_cache_aligned;
+
+/**
+ * Checks that inside given rte_ipsec_session crypto/security fields
+ * are filled correctly and setups function pointers based on these values.
+ * Expects that all fields except IPsec processing function pointers
+ * (*pkt_func*) will be filled correctly by caller.
+ * @param ss
+ * Pointer to the *rte_ipsec_session* object
+ * @return
+ * - Zero if operation completed successfully.
+ * - -EINVAL if the parameters are invalid.
+ */
+int __rte_experimental
+rte_ipsec_session_prepare(struct rte_ipsec_session *ss);
+
+/**
+ * For input mbufs and given IPsec session prepare crypto ops that can be
+ * enqueued into the cryptodev associated with given session.
+ * expects that for each input packet:
+ * - l2_len, l3_len are setup correctly
+ * Note that erroneous mbufs are not freed by the function,
+ * but are placed beyond last valid mbuf in the *mb* array.
+ * It is a user responsibility to handle them further.
+ * @param ss
+ * Pointer to the *rte_ipsec_session* object the packets belong to.
+ * @param mb
+ * The address of an array of *num* pointers to *rte_mbuf* structures
+ * which contain the input packets.
+ * @param cop
+ * The address of an array of *num* pointers to the output *rte_crypto_op*
+ * structures.
+ * @param num
+ * The maximum number of packets to process.
+ * @return
+ * Number of successfully processed packets, with error code set in rte_errno.
+ */
+static inline uint16_t __rte_experimental
+rte_ipsec_pkt_crypto_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+ return ss->pkt_func.prepare(ss, mb, cop, num);
+}
+
+/**
+ * Finalise processing of packets after crypto-dev finished with them or
+ * process packets that are subjects to inline IPsec offload.
+ * Expects that for each input packet:
+ * - l2_len, l3_len are setup correctly
+ * Output mbufs will be:
+ * inbound - decrypted & authenticated, ESP(AH) related headers removed,
+ * *l2_len* and *l3_len* fields are updated.
+ * outbound - appropriate mbuf fields (ol_flags, tx_offloads, etc.)
+ * properly setup, if necessary - IP headers updated, ESP(AH) fields added,
+ * Note that erroneous mbufs are not freed by the function,
+ * but are placed beyond last valid mbuf in the *mb* array.
+ * It is a user responsibility to handle them further.
+ * @param ss
+ * Pointer to the *rte_ipsec_session* object the packets belong to.
+ * @param mb
+ * The address of an array of *num* pointers to *rte_mbuf* structures
+ * which contain the input packets.
+ * @param num
+ * The maximum number of packets to process.
+ * @return
+ * Number of successfully processed packets, with error code set in rte_errno.
+ */
+static inline uint16_t __rte_experimental
+rte_ipsec_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num)
+{
+ return ss->pkt_func.process(ss, mb, num);
+}
+
+#include <rte_ipsec_group.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_group.h b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_group.h
new file mode 100644
index 000000000..740fa7c99
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_group.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_GROUP_H_
+#define _RTE_IPSEC_GROUP_H_
+
+/**
+ * @file rte_ipsec_group.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE IPsec support.
+ * It is not recommended to include this file directly,
+ * include <rte_ipsec.h> instead.
+ * Contains helper functions to process completed crypto-ops
+ * and group related packets by sessions they belong to.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Used to group mbufs by some id.
+ * See below for particular usage.
+ */
+struct rte_ipsec_group {
+ union {
+ uint64_t val;
+ void *ptr;
+ } id; /**< grouped by value */
+ struct rte_mbuf **m; /**< start of the group */
+ uint32_t cnt; /**< number of entries in the group */
+ int32_t rc; /**< status code associated with the group */
+};
+
+/**
+ * Take crypto-op as an input and extract pointer to related ipsec session.
+ * @param cop
+ * The address of an input *rte_crypto_op* structure.
+ * @return
+ * The pointer to the related *rte_ipsec_session* structure.
+ */
+static inline __rte_experimental struct rte_ipsec_session *
+rte_ipsec_ses_from_crypto(const struct rte_crypto_op *cop)
+{
+ const struct rte_security_session *ss;
+ const struct rte_cryptodev_sym_session *cs;
+
+ if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ ss = cop->sym[0].sec_session;
+ return (void *)(uintptr_t)ss->opaque_data;
+ } else if (cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ cs = cop->sym[0].session;
+ return (void *)(uintptr_t)cs->opaque_data;
+ }
+ return NULL;
+}
+
+/**
+ * Take as input completed crypto ops, extract related mbufs
+ * and group them by rte_ipsec_session they belong to.
+ * For mbuf which crypto-op wasn't completed successfully
+ * PKT_RX_SEC_OFFLOAD_FAILED will be raised in ol_flags.
+ * Note that mbufs with undetermined SA (session-less) are not freed
+ * by the function, but are placed beyond mbufs for the last valid group.
+ * It is a user responsibility to handle them further.
+ * @param cop
+ * The address of an array of *num* pointers to the input *rte_crypto_op*
+ * structures.
+ * @param mb
+ * The address of an array of *num* pointers to output *rte_mbuf* structures.
+ * @param grp
+ * The address of an array of *num* to output *rte_ipsec_group* structures.
+ * @param num
+ * The maximum number of crypto-ops to process.
+ * @return
+ * Number of filled elements in *grp* array.
+ */
+static inline uint16_t __rte_experimental
+rte_ipsec_pkt_crypto_group(const struct rte_crypto_op *cop[],
+ struct rte_mbuf *mb[], struct rte_ipsec_group grp[], uint16_t num)
+{
+ uint32_t i, j, k, n;
+ void *ns, *ps;
+ struct rte_mbuf *m, *dr[num];
+
+ j = 0;
+ k = 0;
+ n = 0;
+ ps = NULL;
+
+ for (i = 0; i != num; i++) {
+
+ m = cop[i]->sym[0].m_src;
+ ns = cop[i]->sym[0].session;
+
+ m->ol_flags |= PKT_RX_SEC_OFFLOAD;
+ if (cop[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ m->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+ /* no valid session found */
+ if (ns == NULL) {
+ dr[k++] = m;
+ continue;
+ }
+
+ /* different SA */
+ if (ps != ns) {
+
+ /*
+ * we already have an open group - finalize it,
+ * then open a new one.
+ */
+ if (ps != NULL) {
+ grp[n].id.ptr =
+ rte_ipsec_ses_from_crypto(cop[i - 1]);
+ grp[n].cnt = mb + j - grp[n].m;
+ n++;
+ }
+
+ /* start new group */
+ grp[n].m = mb + j;
+ ps = ns;
+ }
+
+ mb[j++] = m;
+ }
+
+ /* finalise last group */
+ if (ps != NULL) {
+ grp[n].id.ptr = rte_ipsec_ses_from_crypto(cop[i - 1]);
+ grp[n].cnt = mb + j - grp[n].m;
+ n++;
+ }
+
+ /* copy mbufs with unknown session beyond recognised ones */
+ if (k != 0 && k != num) {
+ for (i = 0; i != k; i++)
+ mb[j + i] = dr[i];
+ }
+
+ return n;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_GROUP_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_sa.h b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_sa.h
new file mode 100644
index 000000000..fd9b3ed60
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_sa.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_SA_H_
+#define _RTE_IPSEC_SA_H_
+
+/**
+ * @file rte_ipsec_sa.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Defines API to manage IPsec Security Association (SA) objects.
+ */
+
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * An opaque structure to represent Security Association (SA).
+ */
+struct rte_ipsec_sa;
+
+/**
+ * SA initialization parameters.
+ */
+struct rte_ipsec_sa_prm {
+
+ uint64_t userdata; /**< provided and interpreted by user */
+ uint64_t flags; /**< see RTE_IPSEC_SAFLAG_* below */
+ /** ipsec configuration */
+ struct rte_security_ipsec_xform ipsec_xform;
+ /** crypto session configuration */
+ struct rte_crypto_sym_xform *crypto_xform;
+ union {
+ struct {
+ uint8_t hdr_len; /**< tunnel header len */
+ uint8_t hdr_l3_off; /**< offset for IPv4/IPv6 header */
+ uint8_t next_proto; /**< next header protocol */
+ const void *hdr; /**< tunnel header template */
+ } tun; /**< tunnel mode related parameters */
+ struct {
+ uint8_t proto; /**< next header protocol */
+ } trs; /**< transport mode related parameters */
+ };
+
+ /**
+ * window size to enable sequence replay attack handling.
+ * replay checking is disabled if the window size is 0.
+ */
+ uint32_t replay_win_sz;
+};
+
+/**
+ * Indicates that SA will(/will not) need an 'atomic' access
+ * to sequence number and replay window.
+ * 'atomic' here means:
+ * functions:
+ * - rte_ipsec_pkt_crypto_prepare
+ * - rte_ipsec_pkt_process
+ * can be safely used in MT environment, as long as the user can guarantee
+ * that they obey multiple readers/single writer model for SQN+replay_window
+ * operations.
+ * To be more specific:
+ * for outbound SA there are no restrictions.
+ * for inbound SA the caller has to guarantee that at any given moment
+ * only one thread is executing rte_ipsec_pkt_process() for given SA.
+ * Note that it is caller responsibility to maintain correct order
+ * of packets to be processed.
+ * In other words - it is a caller responsibility to serialize process()
+ * invocations.
+ */
+#define RTE_IPSEC_SAFLAG_SQN_ATOM (1ULL << 0)
+
+/**
+ * SA type is an 64-bit value that contain the following information:
+ * - IP version (IPv4/IPv6)
+ * - IPsec proto (ESP/AH)
+ * - inbound/outbound
+ * - mode (TRANSPORT/TUNNEL)
+ * - for TUNNEL outer IP version (IPv4/IPv6)
+ * - are SA SQN operations 'atomic'
+ * - ESN enabled/disabled
+ * ...
+ */
+
+enum {
+ RTE_SATP_LOG2_IPV,
+ RTE_SATP_LOG2_PROTO,
+ RTE_SATP_LOG2_DIR,
+ RTE_SATP_LOG2_MODE,
+ RTE_SATP_LOG2_SQN = RTE_SATP_LOG2_MODE + 2,
+ RTE_SATP_LOG2_ESN,
+ RTE_SATP_LOG2_NUM
+};
+
+#define RTE_IPSEC_SATP_IPV_MASK (1ULL << RTE_SATP_LOG2_IPV)
+#define RTE_IPSEC_SATP_IPV4 (0ULL << RTE_SATP_LOG2_IPV)
+#define RTE_IPSEC_SATP_IPV6 (1ULL << RTE_SATP_LOG2_IPV)
+
+#define RTE_IPSEC_SATP_PROTO_MASK (1ULL << RTE_SATP_LOG2_PROTO)
+#define RTE_IPSEC_SATP_PROTO_AH (0ULL << RTE_SATP_LOG2_PROTO)
+#define RTE_IPSEC_SATP_PROTO_ESP (1ULL << RTE_SATP_LOG2_PROTO)
+
+#define RTE_IPSEC_SATP_DIR_MASK (1ULL << RTE_SATP_LOG2_DIR)
+#define RTE_IPSEC_SATP_DIR_IB (0ULL << RTE_SATP_LOG2_DIR)
+#define RTE_IPSEC_SATP_DIR_OB (1ULL << RTE_SATP_LOG2_DIR)
+
+#define RTE_IPSEC_SATP_MODE_MASK (3ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TRANS (0ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV4 (1ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV6 (2ULL << RTE_SATP_LOG2_MODE)
+
+#define RTE_IPSEC_SATP_SQN_MASK (1ULL << RTE_SATP_LOG2_SQN)
+#define RTE_IPSEC_SATP_SQN_RAW (0ULL << RTE_SATP_LOG2_SQN)
+#define RTE_IPSEC_SATP_SQN_ATOM (1ULL << RTE_SATP_LOG2_SQN)
+
+#define RTE_IPSEC_SATP_ESN_MASK (1ULL << RTE_SATP_LOG2_ESN)
+#define RTE_IPSEC_SATP_ESN_DISABLE (0ULL << RTE_SATP_LOG2_ESN)
+#define RTE_IPSEC_SATP_ESN_ENABLE (1ULL << RTE_SATP_LOG2_ESN)
+
+/**
+ * get type of given SA
+ * @return
+ * SA type value.
+ */
+uint64_t __rte_experimental
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa);
+
+/**
+ * Calculate required SA size based on provided input parameters.
+ * @param prm
+ * Parameters that will be used to initialise SA object.
+ * @return
+ * - Actual size required for SA with given parameters.
+ * - -EINVAL if the parameters are invalid.
+ */
+int __rte_experimental
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm);
+
+/**
+ * initialise SA based on provided input parameters.
+ * @param sa
+ * SA object to initialise.
+ * @param prm
+ * Parameters used to initialise given SA object.
+ * @param size
+ * size of the provided buffer for SA.
+ * @return
+ * - Actual size of SA object if operation completed successfully.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if the size of the provided buffer is not big enough.
+ */
+int __rte_experimental
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ uint32_t size);
+
+/**
+ * cleanup SA
+ * @param sa
+ * Pointer to SA object to de-initialize.
+ */
+void __rte_experimental
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_SA_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_version.map b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_version.map
new file mode 100644
index 000000000..ee9f1961b
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/rte_ipsec_version.map
@@ -0,0 +1,15 @@
+EXPERIMENTAL {
+ global:
+
+ rte_ipsec_pkt_crypto_group;
+ rte_ipsec_pkt_crypto_prepare;
+ rte_ipsec_pkt_process;
+ rte_ipsec_sa_fini;
+ rte_ipsec_sa_init;
+ rte_ipsec_sa_size;
+ rte_ipsec_sa_type;
+ rte_ipsec_ses_from_crypto;
+ rte_ipsec_session_prepare;
+
+ local: *;
+};
diff --git a/src/seastar/dpdk/lib/librte_ipsec/sa.c b/src/seastar/dpdk/lib/librte_ipsec/sa.c
new file mode 100644
index 000000000..846e317fe
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/sa.c
@@ -0,0 +1,668 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+#define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
+#define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
+
+/* some helper structures */
+struct crypto_xform {
+ struct rte_crypto_auth_xform *auth;
+ struct rte_crypto_cipher_xform *cipher;
+ struct rte_crypto_aead_xform *aead;
+};
+
+/*
+ * helper routine, fills internal crypto_xform structure.
+ */
+static int
+fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
+ const struct rte_ipsec_sa_prm *prm)
+{
+ struct rte_crypto_sym_xform *xf, *xfn;
+
+ memset(xform, 0, sizeof(*xform));
+
+ xf = prm->crypto_xform;
+ if (xf == NULL)
+ return -EINVAL;
+
+ xfn = xf->next;
+
+ /* for AEAD just one xform required */
+ if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xfn != NULL)
+ return -EINVAL;
+ xform->aead = &xf->aead;
+ /*
+ * CIPHER+AUTH xforms are expected in strict order,
+ * depending on SA direction:
+ * inbound: AUTH+CIPHER
+ * outbound: CIPHER+AUTH
+ */
+ } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+
+ /* wrong order or no cipher */
+ if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
+ xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return -EINVAL;
+
+ xform->auth = &xf->auth;
+ xform->cipher = &xfn->cipher;
+
+ } else {
+
+ /* wrong order or no auth */
+ if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
+ xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -EINVAL;
+
+ xform->cipher = &xf->cipher;
+ xform->auth = &xfn->auth;
+ }
+
+ return 0;
+}
+
+uint64_t __rte_experimental
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
+{
+ return sa->type;
+}
+
+/**
+ * Based on number of buckets calculated required size for the
+ * structure that holds replay window and sequence number (RSN) information.
+ */
+static size_t
+rsn_size(uint32_t nb_bucket)
+{
+ size_t sz;
+ struct replay_sqn *rsn;
+
+ sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
+ sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
+ return sz;
+}
+
+/*
+ * for given size, calculate required number of buckets.
+ */
+static uint32_t
+replay_num_bucket(uint32_t wsz)
+{
+ uint32_t nb;
+
+ nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
+ WINDOW_BUCKET_SIZE);
+ nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
+
+ return nb;
+}
+
+static int32_t
+ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
+{
+ uint32_t n, sz, wsz;
+
+ wsz = *wnd_sz;
+ n = 0;
+
+ if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+
+ /*
+ * RFC 4303 recommends 64 as minimum window size.
+ * there is no point to use ESN mode without SQN window,
+ * so make sure we have at least 64 window when ESN is enalbed.
+ */
+ wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
+ RTE_IPSEC_SATP_ESN_DISABLE) ?
+ wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
+ if (wsz != 0)
+ n = replay_num_bucket(wsz);
+ }
+
+ if (n > WINDOW_BUCKET_MAX)
+ return -EINVAL;
+
+ *wnd_sz = wsz;
+ *nb_bucket = n;
+
+ sz = rsn_size(n);
+ if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
+ sz *= REPLAY_SQN_NUM;
+
+ sz += sizeof(struct rte_ipsec_sa);
+ return sz;
+}
+
+void __rte_experimental
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
+{
+ memset(sa, 0, sa->size);
+}
+
+/*
+ * Determine expected SA type based on input parameters.
+ */
+static int
+fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
+{
+ uint64_t tp;
+
+ tp = 0;
+
+ if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+ tp |= RTE_IPSEC_SATP_PROTO_AH;
+ else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ tp |= RTE_IPSEC_SATP_PROTO_ESP;
+ else
+ return -EINVAL;
+
+ if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ tp |= RTE_IPSEC_SATP_DIR_OB;
+ else if (prm->ipsec_xform.direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+ tp |= RTE_IPSEC_SATP_DIR_IB;
+ else
+ return -EINVAL;
+
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ if (prm->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+ tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
+ else if (prm->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+ tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
+ else
+ return -EINVAL;
+
+ if (prm->tun.next_proto == IPPROTO_IPIP)
+ tp |= RTE_IPSEC_SATP_IPV4;
+ else if (prm->tun.next_proto == IPPROTO_IPV6)
+ tp |= RTE_IPSEC_SATP_IPV6;
+ else
+ return -EINVAL;
+ } else if (prm->ipsec_xform.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+ tp |= RTE_IPSEC_SATP_MODE_TRANS;
+ if (prm->trs.proto == IPPROTO_IPIP)
+ tp |= RTE_IPSEC_SATP_IPV4;
+ else if (prm->trs.proto == IPPROTO_IPV6)
+ tp |= RTE_IPSEC_SATP_IPV6;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ /* check for ESN flag */
+ if (prm->ipsec_xform.options.esn == 0)
+ tp |= RTE_IPSEC_SATP_ESN_DISABLE;
+ else
+ tp |= RTE_IPSEC_SATP_ESN_ENABLE;
+
+ /* interpret flags */
+ if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
+ tp |= RTE_IPSEC_SATP_SQN_ATOM;
+ else
+ tp |= RTE_IPSEC_SATP_SQN_RAW;
+
+ *type = tp;
+ return 0;
+}
+
+/*
+ * Init ESP inbound specific things.
+ */
+static void
+esp_inb_init(struct rte_ipsec_sa *sa)
+{
+ /* these params may differ with new algorithms support */
+ sa->ctp.auth.offset = 0;
+ sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
+ sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
+ sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
+}
+
+/*
+ * Init ESP inbound tunnel specific things.
+ */
+static void
+esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+ sa->proto = prm->tun.next_proto;
+ esp_inb_init(sa);
+}
+
+/*
+ * Init ESP outbound specific things.
+ */
+static void
+esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
+{
+ uint8_t algo_type;
+
+ sa->sqn.outb.raw = 1;
+
+ /* these params may differ with new algorithms support */
+ sa->ctp.auth.offset = hlen;
+ sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
+
+ algo_type = sa->algo_type;
+
+ switch (algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ case ALGO_TYPE_AES_CTR:
+ case ALGO_TYPE_NULL:
+ sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
+ sa->iv_len;
+ sa->ctp.cipher.length = 0;
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
+ sa->ctp.cipher.length = sa->iv_len;
+ break;
+ }
+}
+
+/*
+ * Init ESP outbound tunnel specific things.
+ */
+static void
+esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+ sa->proto = prm->tun.next_proto;
+ sa->hdr_len = prm->tun.hdr_len;
+ sa->hdr_l3_off = prm->tun.hdr_l3_off;
+
+ /* update l2_len and l3_len fields for outbound mbuf */
+ sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
+ sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
+
+ memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
+
+ esp_outb_init(sa, sa->hdr_len);
+}
+
+/*
+ * helper function, init SA structure.
+ */
+static int
+esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ const struct crypto_xform *cxf)
+{
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ if (cxf->aead != NULL) {
+ switch (cxf->aead->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ /* RFC 4106 */
+ sa->aad_len = sizeof(struct aead_gcm_aad);
+ sa->icv_len = cxf->aead->digest_length;
+ sa->iv_ofs = cxf->aead->iv.offset;
+ sa->iv_len = sizeof(uint64_t);
+ sa->pad_align = IPSEC_PAD_AES_GCM;
+ sa->algo_type = ALGO_TYPE_AES_GCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ sa->icv_len = cxf->auth->digest_length;
+ sa->iv_ofs = cxf->cipher->iv.offset;
+ sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
+
+ switch (cxf->cipher->algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ sa->pad_align = IPSEC_PAD_NULL;
+ sa->iv_len = 0;
+ sa->algo_type = ALGO_TYPE_NULL;
+ break;
+
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sa->pad_align = IPSEC_PAD_AES_CBC;
+ sa->iv_len = IPSEC_MAX_IV_SIZE;
+ sa->algo_type = ALGO_TYPE_AES_CBC;
+ break;
+
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ /* RFC 3686 */
+ sa->pad_align = IPSEC_PAD_AES_CTR;
+ sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
+ sa->algo_type = ALGO_TYPE_AES_CTR;
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ /* RFC 1851 */
+ sa->pad_align = IPSEC_PAD_3DES_CBC;
+ sa->iv_len = IPSEC_3DES_IV_SIZE;
+ sa->algo_type = ALGO_TYPE_3DES_CBC;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ sa->udata = prm->userdata;
+ sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
+ sa->salt = prm->ipsec_xform.salt;
+
+ /* preserve all values except l2_len and l3_len */
+ sa->tx_offload.msk =
+ ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
+ 0, 0, 0, 0, 0);
+
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ esp_inb_tun_init(sa, prm);
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ esp_inb_init(sa);
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ esp_outb_tun_init(sa, prm);
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ esp_outb_init(sa, 0);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * helper function, init SA replay structure.
+ */
+static void
+fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
+{
+ sa->replay.win_sz = wnd_sz;
+ sa->replay.nb_bucket = nb_bucket;
+ sa->replay.bucket_index_mask = nb_bucket - 1;
+ sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
+ if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
+ sa->sqn.inb.rsn[1] = (struct replay_sqn *)
+ ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
+}
+
+int __rte_experimental
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
+{
+ uint64_t type;
+ uint32_t nb, wsz;
+ int32_t rc;
+
+ if (prm == NULL)
+ return -EINVAL;
+
+ /* determine SA type */
+ rc = fill_sa_type(prm, &type);
+ if (rc != 0)
+ return rc;
+
+ /* determine required size */
+ wsz = prm->replay_win_sz;
+ return ipsec_sa_size(type, &wsz, &nb);
+}
+
+int __rte_experimental
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ uint32_t size)
+{
+ int32_t rc, sz;
+ uint32_t nb, wsz;
+ uint64_t type;
+ struct crypto_xform cxf;
+
+ if (sa == NULL || prm == NULL)
+ return -EINVAL;
+
+ /* determine SA type */
+ rc = fill_sa_type(prm, &type);
+ if (rc != 0)
+ return rc;
+
+ /* determine required size */
+ wsz = prm->replay_win_sz;
+ sz = ipsec_sa_size(type, &wsz, &nb);
+ if (sz < 0)
+ return sz;
+ else if (size < (uint32_t)sz)
+ return -ENOSPC;
+
+ /* only esp is supported right now */
+ if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ return -EINVAL;
+
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ prm->tun.hdr_len > sizeof(sa->hdr))
+ return -EINVAL;
+
+ rc = fill_crypto_xform(&cxf, type, prm);
+ if (rc != 0)
+ return rc;
+
+ /* initialize SA */
+
+ memset(sa, 0, sz);
+ sa->type = type;
+ sa->size = sz;
+
+ /* check for ESN flag */
+ sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
+ UINT32_MAX : UINT64_MAX;
+
+ rc = esp_sa_init(sa, prm, &cxf);
+ if (rc != 0)
+ rte_ipsec_sa_fini(sa);
+
+ /* fill replay window related fields */
+ if (nb != 0)
+ fill_sa_replay(sa, wsz, nb);
+
+ return sz;
+}
+
+/*
+ * setup crypto ops for LOOKASIDE_PROTO type of devices.
+ */
+static inline void
+lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+ uint32_t i;
+ struct rte_crypto_sym_op *sop;
+
+ for (i = 0; i != num; i++) {
+ sop = cop[i]->sym;
+ cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
+ sop->m_src = mb[i];
+ __rte_security_attach_session(sop, ss->security.ses);
+ }
+}
+
+/*
+ * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
+ * Note that for LOOKASIDE_PROTO all packet modifications will be
+ * performed by PMD/HW.
+ * SW has only to prepare crypto op.
+ */
+static uint16_t
+lksd_proto_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+ lksd_proto_cop_prepare(ss, mb, cop, num);
+ return num;
+}
+
+/*
+ * simplest pkt process routine:
+ * all actual processing is already done by HW/PMD,
+ * just check mbuf ol_flags.
+ * used for:
+ * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
+ * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
+ * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
+ */
+static uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num)
+{
+ uint32_t i, k;
+ uint32_t dr[num];
+
+ RTE_SET_USED(ss);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
+ k++;
+ else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num) {
+ rte_errno = EBADMSG;
+ if (k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ return k;
+}
+
+/*
+ * Select packet processing function for session on LOOKASIDE_NONE
+ * type of device.
+ */
+static int
+lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare = esp_inb_pkt_prepare;
+ pf->process = esp_inb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare = esp_inb_pkt_prepare;
+ pf->process = esp_inb_trs_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare = esp_outb_tun_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare = esp_outb_trs_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+/*
+ * Select packet processing function for session on INLINE_CRYPTO
+ * type of device.
+ */
+static int
+inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = esp_inb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = esp_inb_trs_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = inline_outb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = inline_outb_trs_pkt_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+/*
+ * Select packet processing function for given session based on SA parameters
+ * and type of associated with the session device.
+ */
+int
+ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
+ const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ rc = 0;
+ pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
+
+ switch (ss->type) {
+ case RTE_SECURITY_ACTION_TYPE_NONE:
+ rc = lksd_none_pkt_func_select(sa, pf);
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ rc = inline_crypto_pkt_func_select(sa, pf);
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
+ RTE_IPSEC_SATP_DIR_IB)
+ pf->process = pkt_flag_process;
+ else
+ pf->process = inline_proto_outb_pkt_process;
+ break;
+ case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+ pf->prepare = lksd_proto_prepare;
+ pf->process = pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
diff --git a/src/seastar/dpdk/lib/librte_ipsec/sa.h b/src/seastar/dpdk/lib/librte_ipsec/sa.h
new file mode 100644
index 000000000..ffb5fb4f8
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/sa.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _SA_H_
+#define _SA_H_
+
+#include <rte_rwlock.h>
+
+#define IPSEC_MAX_HDR_SIZE 64
+#define IPSEC_MAX_IV_SIZE 16
+#define IPSEC_MAX_IV_QWORD (IPSEC_MAX_IV_SIZE / sizeof(uint64_t))
+
+/* padding alignment for different algorithms */
+enum {
+ IPSEC_PAD_DEFAULT = 4,
+ IPSEC_PAD_3DES_CBC = 8,
+ IPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,
+ IPSEC_PAD_AES_CTR = IPSEC_PAD_DEFAULT,
+ IPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,
+ IPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,
+};
+
+/* iv sizes for different algorithms */
+enum {
+ IPSEC_IV_SIZE_DEFAULT = IPSEC_MAX_IV_SIZE,
+ IPSEC_AES_CTR_IV_SIZE = sizeof(uint64_t),
+ /* TripleDES supports IV size of 32bits or 64bits but he library
+ * only supports 64bits.
+ */
+ IPSEC_3DES_IV_SIZE = sizeof(uint64_t),
+};
+
+/* these definitions probably has to be in rte_crypto_sym.h */
+union sym_op_ofslen {
+ uint64_t raw;
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ };
+};
+
+union sym_op_data {
+#ifdef __SIZEOF_INT128__
+ __uint128_t raw;
+#endif
+ struct {
+ uint8_t *va;
+ rte_iova_t pa;
+ };
+};
+
+#define REPLAY_SQN_NUM 2
+#define REPLAY_SQN_NEXT(n) ((n) ^ 1)
+
+struct replay_sqn {
+ rte_rwlock_t rwl;
+ uint64_t sqn;
+ __extension__ uint64_t window[0];
+};
+
+/*IPSEC SA supported algorithms */
+enum sa_algo_type {
+ ALGO_TYPE_NULL = 0,
+ ALGO_TYPE_3DES_CBC,
+ ALGO_TYPE_AES_CBC,
+ ALGO_TYPE_AES_CTR,
+ ALGO_TYPE_AES_GCM,
+ ALGO_TYPE_MAX
+};
+
+struct rte_ipsec_sa {
+
+ uint64_t type; /* type of given SA */
+ uint64_t udata; /* user defined */
+ uint32_t size; /* size of given sa object */
+ uint32_t spi;
+ /* sqn calculations related */
+ uint64_t sqn_mask;
+ struct {
+ uint32_t win_sz;
+ uint16_t nb_bucket;
+ uint16_t bucket_index_mask;
+ } replay;
+ /* template for crypto op fields */
+ struct {
+ union sym_op_ofslen cipher;
+ union sym_op_ofslen auth;
+ } ctp;
+ /* tx_offload template for tunnel mbuf */
+ struct {
+ uint64_t msk;
+ uint64_t val;
+ } tx_offload;
+ uint32_t salt;
+ uint8_t algo_type;
+ uint8_t proto; /* next proto */
+ uint8_t aad_len;
+ uint8_t hdr_len;
+ uint8_t hdr_l3_off;
+ uint8_t icv_len;
+ uint8_t sqh_len;
+ uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */
+ uint8_t iv_len;
+ uint8_t pad_align;
+
+ /* template for tunnel header */
+ uint8_t hdr[IPSEC_MAX_HDR_SIZE];
+
+ /*
+ * sqn and replay window
+ * In case of SA handled by multiple threads *sqn* cacheline
+ * could be shared by multiple cores.
+ * To minimise perfomance impact, we try to locate in a separate
+ * place from other frequently accesed data.
+ */
+ union {
+ union {
+ rte_atomic64_t atom;
+ uint64_t raw;
+ } outb;
+ struct {
+ uint32_t rdidx; /* read index */
+ uint32_t wridx; /* write index */
+ struct replay_sqn *rsn[REPLAY_SQN_NUM];
+ } inb;
+ } sqn;
+
+} __rte_cache_aligned;
+
+int
+ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
+ const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf);
+
+/* inbound processing */
+
+uint16_t
+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+/* outbound processing */
+
+uint16_t
+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num);
+
+uint16_t
+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+#endif /* _SA_H_ */
diff --git a/src/seastar/dpdk/lib/librte_ipsec/ses.c b/src/seastar/dpdk/lib/librte_ipsec/ses.c
new file mode 100644
index 000000000..11580970e
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_ipsec/ses.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include "sa.h"
+
+static int
+session_check(struct rte_ipsec_session *ss)
+{
+ if (ss == NULL || ss->sa == NULL)
+ return -EINVAL;
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (ss->crypto.ses == NULL)
+ return -EINVAL;
+ } else {
+ if (ss->security.ses == NULL)
+ return -EINVAL;
+ if ((ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) &&
+ ss->security.ctx == NULL)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_ipsec_session_prepare(struct rte_ipsec_session *ss)
+{
+ int32_t rc;
+ struct rte_ipsec_sa_pkt_func fp;
+
+ rc = session_check(ss);
+ if (rc != 0)
+ return rc;
+
+ rc = ipsec_sa_pkt_func_select(ss, ss->sa, &fp);
+ if (rc != 0)
+ return rc;
+
+ ss->pkt_func = fp;
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE)
+ ss->crypto.ses->opaque_data = (uintptr_t)ss;
+ else
+ ss->security.ses->opaque_data = (uintptr_t)ss;
+
+ return 0;
+}