From 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 20:24:20 +0200 Subject: Adding upstream version 14.2.21. Signed-off-by: Daniel Baumann --- src/spdk/dpdk/examples/ipsec-secgw/Makefile | 82 + src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg | 160 ++ src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg | 160 ++ src/spdk/dpdk/examples/ipsec-secgw/esp.c | 461 ++++++ src/spdk/dpdk/examples/ipsec-secgw/esp.h | 26 + src/spdk/dpdk/examples/ipsec-secgw/ipip.h | 174 ++ src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c | 1828 ++++++++++++++++++++++ src/spdk/dpdk/examples/ipsec-secgw/ipsec.c | 562 +++++++ src/spdk/dpdk/examples/ipsec-secgw/ipsec.h | 242 +++ src/spdk/dpdk/examples/ipsec-secgw/meson.build | 14 + src/spdk/dpdk/examples/ipsec-secgw/parser.c | 563 +++++++ src/spdk/dpdk/examples/ipsec-secgw/parser.h | 88 ++ src/spdk/dpdk/examples/ipsec-secgw/rt.c | 206 +++ src/spdk/dpdk/examples/ipsec-secgw/sa.c | 1010 ++++++++++++ src/spdk/dpdk/examples/ipsec-secgw/sp4.c | 506 ++++++ src/spdk/dpdk/examples/ipsec-secgw/sp6.c | 620 ++++++++ 16 files changed, 6702 insertions(+) create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/Makefile create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/esp.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/esp.h create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/ipip.h create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/ipsec.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/ipsec.h create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/meson.build create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/parser.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/parser.h create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/rt.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/sa.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/sp4.c create mode 100644 src/spdk/dpdk/examples/ipsec-secgw/sp6.c (limited to 'src/spdk/dpdk/examples/ipsec-secgw') diff --git a/src/spdk/dpdk/examples/ipsec-secgw/Makefile b/src/spdk/dpdk/examples/ipsec-secgw/Makefile new file mode 100644 index 00000000..02d41e39 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/Makefile @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016 Intel Corporation + +APP = ipsec-secgw + +# +# all source are stored in SRCS-y +# +SRCS-y += parser.c +SRCS-y += ipsec.c +SRCS-y += esp.c +SRCS-y += sp4.c +SRCS-y += sp6.c +SRCS-y += sa.c +SRCS-y += rt.c +SRCS-y += ipsec-secgw.c + +CFLAGS += -gdwarf-2 + +# Build using pkg-config variables if possible +$(shell pkg-config --exists libdpdk) +ifeq ($(.SHELLSTATUS),0) + +all: shared +.PHONY: shared static +shared: build/$(APP)-shared + ln -sf $(APP)-shared build/$(APP) +static: build/$(APP)-static + ln -sf $(APP)-static build/$(APP) + +PC_FILE := $(shell pkg-config --path libdpdk) +CFLAGS += -O3 $(shell pkg-config --cflags libdpdk) +LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk) +LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk) + +CFLAGS += -DALLOW_EXPERIMENTAL_API + +build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) + +build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC) + +build: + @mkdir -p $@ + +.PHONY: clean +clean: + rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared + rmdir --ignore-fail-on-non-empty build + +else + +ifeq ($(RTE_SDK),) + $(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overridden by command line or environment +RTE_TARGET ?= x86_64-native-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y) +$(error "RTE_LIBRTE_SECURITY is required to build ipsec-secgw") +endif +endif + +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 -gdwarf-2 +CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_sa.o += -diag-disable=vec +endif + +ifeq ($(DEBUG),1) +CFLAGS += -DIPSEC_DEBUG -fstack-protector-all -O0 +endif + +include $(RTE_SDK)/mk/rte.extapp.mk + +endif diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg b/src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg new file mode 100644 index 00000000..299aa9e0 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint 0 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep0.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 out esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 in esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 out esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa out 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa out 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa out 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa out 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa out 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa out 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa in 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa in 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa in 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa in 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.2.5/32 port 0 +rt ipv4 dst 172.16.2.6/32 port 1 +rt ipv4 dst 192.168.175.0/24 port 0 +rt ipv4 dst 192.168.176.0/24 port 1 +rt ipv4 dst 192.168.240.0/24 port 0 +rt ipv4 dst 192.168.241.0/24 port 1 +rt ipv4 dst 192.168.115.0/24 port 2 +rt ipv4 dst 192.168.116.0/24 port 3 +rt ipv4 dst 192.168.65.0/24 port 2 +rt ipv4 dst 192.168.66.0/24 port 3 +rt ipv4 dst 192.168.185.0/24 port 2 +rt ipv4 dst 192.168.186.0/24 port 3 +rt ipv4 dst 192.168.210.0/24 port 2 +rt ipv4 dst 192.168.211.0/24 port 3 +rt ipv4 dst 192.168.245.0/24 port 2 +rt ipv4 dst 192.168.246.0/24 port 3 + +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:5555/116 port 0 +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:6666/116 port 1 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg b/src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg new file mode 100644 index 00000000..3f6ff811 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint1 sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint1 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep1.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 in esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 out esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 in esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 out esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa in 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa in 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa in 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa in 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa in 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa in 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa out 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa out 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa out 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa out 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.1.5/32 port 0 +rt ipv4 dst 172.16.1.6/32 port 1 +rt ipv4 dst 192.168.185.0/24 port 0 +rt ipv4 dst 192.168.186.0/24 port 1 +rt ipv4 dst 192.168.245.0/24 port 0 +rt ipv4 dst 192.168.246.0/24 port 1 +rt ipv4 dst 192.168.105.0/24 port 2 +rt ipv4 dst 192.168.106.0/24 port 3 +rt ipv4 dst 192.168.55.0/24 port 2 +rt ipv4 dst 192.168.56.0/24 port 3 +rt ipv4 dst 192.168.175.0/24 port 2 +rt ipv4 dst 192.168.176.0/24 port 3 +rt ipv4 dst 192.168.200.0/24 port 2 +rt ipv4 dst 192.168.201.0/24 port 3 +rt ipv4 dst 192.168.240.0/24 port 2 +rt ipv4 dst 192.168.241.0/24 port 3 + +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:5555/116 port 0 +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:6666/116 port 1 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/src/spdk/dpdk/examples/ipsec-secgw/esp.c b/src/spdk/dpdk/examples/ipsec-secgw/esp.c new file mode 100644 index 00000000..ee9e590a --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/esp.c @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ipsec.h" +#include "esp.h" +#include "ipip.h" + +int +esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4; + struct rte_crypto_sym_op *sym_cop; + int32_t payload_len, ip_hdr_len; + + RTE_ASSERT(sa != NULL); + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + return 0; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(cop != NULL); + + ip4 = rte_pktmbuf_mtod(m, struct ip *); + if (likely(ip4->ip_v == IPVERSION)) + ip_hdr_len = ip4->ip_hl * 4; + else if (ip4->ip_v == IP6_VERSION) + /* XXX No option headers supported */ + ip_hdr_len = sizeof(struct ip6_hdr); + else { + RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", + ip4->ip_v); + return -EINVAL; + } + + payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - + sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; + + if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { + RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", + payload_len, sa->block_size); + return -EINVAL; + } + + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + + sa->iv_len; + sym_cop->aead.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *aad; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + + aad = get_aad(m); + memcpy(aad, iv - sizeof(struct esp_hdr), 8); + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + + sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } else { + sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + + sa->iv_len; + sym_cop->cipher.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, + uint8_t *, IV_OFFSET); + + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + /* Copy IV at the end of crypto operation */ + rte_memcpy(iv_ptr, iv, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } + + return 0; +} + +int +esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4, *ip; + struct ip6_hdr *ip6; + uint8_t *nexthdr, *pad_len; + uint8_t *padding; + uint16_t i; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + RTE_ASSERT(cop != NULL); + + if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + } + + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); + return -1; + } + + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) { + nexthdr = &m->inner_esp_next_proto; + } else { + nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, + rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); + pad_len = nexthdr - 1; + + padding = pad_len - *pad_len; + for (i = 0; i < *pad_len; i++) { + if (padding[i] != i + 1) { + RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + return -EINVAL; + } + } + + if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { + RTE_LOG(ERR, IPSEC_ESP, + "failed to remove pad_len + digest\n"); + return -EINVAL; + } + } + + if (unlikely(sa->flags == TRANSPORT)) { + ip = rte_pktmbuf_mtod(m, struct ip *); + ip4 = (struct ip *)rte_pktmbuf_adj(m, + sizeof(struct esp_hdr) + sa->iv_len); + if (likely(ip->ip_v == IPVERSION)) { + memmove(ip4, ip, ip->ip_hl * 4); + ip4->ip_p = *nexthdr; + ip4->ip_len = htons(rte_pktmbuf_data_len(m)); + } else { + ip6 = (struct ip6_hdr *)ip4; + /* XXX No option headers supported */ + memmove(ip6, ip, sizeof(struct ip6_hdr)); + ip6->ip6_nxt = *nexthdr; + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); + } + } else + ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); + + return 0; +} + +int +esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4; + struct ip6_hdr *ip6; + struct esp_hdr *esp = NULL; + uint8_t *padding = NULL, *new_ip, nlp; + struct rte_crypto_sym_op *sym_cop; + int32_t i; + uint16_t pad_payload_len, pad_len, ip_hdr_len; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + + ip_hdr_len = 0; + + ip4 = rte_pktmbuf_mtod(m, struct ip *); + if (likely(ip4->ip_v == IPVERSION)) { + if (unlikely(sa->flags == TRANSPORT)) { + ip_hdr_len = ip4->ip_hl * 4; + nlp = ip4->ip_p; + } else + nlp = IPPROTO_IPIP; + } else if (ip4->ip_v == IP6_VERSION) { + if (unlikely(sa->flags == TRANSPORT)) { + /* XXX No option headers supported */ + ip_hdr_len = sizeof(struct ip6_hdr); + ip6 = (struct ip6_hdr *)ip4; + nlp = ip6->ip6_nxt; + } else + nlp = IPPROTO_IPV6; + } else { + RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", + ip4->ip_v); + return -EINVAL; + } + + /* Padded payload length */ + pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - + ip_hdr_len + 2, sa->block_size); + pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); + + RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL || + sa->flags == TRANSPORT); + + if (likely(sa->flags == IP4_TUNNEL)) + ip_hdr_len = sizeof(struct ip); + else if (sa->flags == IP6_TUNNEL) + ip_hdr_len = sizeof(struct ip6_hdr); + else if (sa->flags != TRANSPORT) { + RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", + sa->flags); + return -EINVAL; + } + + /* Check maximum packet size */ + if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len + + pad_payload_len + sa->digest_len > IP_MAXPACKET)) { + RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); + return -EINVAL; + } + + /* Add trailer padding if it is not constructed by HW */ + if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) { + padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + + sa->digest_len); + if (unlikely(padding == NULL)) { + RTE_LOG(ERR, IPSEC_ESP, + "not enough mbuf trailing space\n"); + return -ENOSPC; + } + rte_prefetch0(padding); + } + + switch (sa->flags) { + case IP4_TUNNEL: + ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, + &sa->src, &sa->dst); + esp = (struct esp_hdr *)(ip4 + 1); + break; + case IP6_TUNNEL: + ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, + &sa->src, &sa->dst); + esp = (struct esp_hdr *)(ip6 + 1); + break; + case TRANSPORT: + new_ip = (uint8_t *)rte_pktmbuf_prepend(m, + sizeof(struct esp_hdr) + sa->iv_len); + memmove(new_ip, ip4, ip_hdr_len); + esp = (struct esp_hdr *)(new_ip + ip_hdr_len); + ip4 = (struct ip *)new_ip; + if (likely(ip4->ip_v == IPVERSION)) { + ip4->ip_p = IPPROTO_ESP; + ip4->ip_len = htons(rte_pktmbuf_data_len(m)); + } else { + ip6 = (struct ip6_hdr *)new_ip; + ip6->ip6_nxt = IPPROTO_ESP; + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); + } + } + + sa->seq++; + esp->spi = rte_cpu_to_be_32(sa->spi); + esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); + + /* set iv */ + uint64_t *iv = (uint64_t *)(esp + 1); + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + *iv = rte_cpu_to_be_64(sa->seq); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + *iv = rte_cpu_to_be_64(sa->seq); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, + "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + } + + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) { + /* Set the inner esp next protocol for HW trailer */ + m->inner_esp_next_proto = nlp; + m->packet_type |= RTE_PTYPE_TUNNEL_ESP; + } else { + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + } + goto done; + } + + RTE_ASSERT(cop != NULL); + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + uint8_t *aad; + + sym_cop->aead.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->aead.data.length = pad_payload_len; + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = rte_cpu_to_be_64(sa->seq); + icb->cnt = rte_cpu_to_be_32(1); + + aad = get_aad(m); + memcpy(aad, esp, 8); + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + + sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr); + sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->cipher.data.length = pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = rte_cpu_to_be_64(sa->seq); + icb->cnt = rte_cpu_to_be_32(1); + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } + +done: + return 0; +} + +int +esp_outbound_post(struct rte_mbuf *m, + struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + + if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { + m->ol_flags |= PKT_TX_SEC_OFFLOAD; + } else { + RTE_ASSERT(cop != NULL); + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); + return -1; + } + } + + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/esp.h b/src/spdk/dpdk/examples/ipsec-secgw/esp.h new file mode 100644 index 00000000..792312cf --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/esp.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ +#ifndef __RTE_IPSEC_XFORM_ESP_H__ +#define __RTE_IPSEC_XFORM_ESP_H__ + +struct mbuf; + + +int +esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_outbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +#endif /* __RTE_IPSEC_XFORM_ESP_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipip.h b/src/spdk/dpdk/examples/ipsec-secgw/ipip.h new file mode 100644 index 00000000..13b8455c --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipip.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#ifndef __IPIP_H__ +#define __IPIP_H__ + +#include +#include +#include +#include + +#include + +static inline void * +ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6, + struct ip_addr *src, struct ip_addr *dst) +{ + struct ip *inip4, *outip4; + struct ip6_hdr *inip6, *outip6; + uint8_t ds_ecn; + + inip4 = rte_pktmbuf_mtod(m, struct ip *); + + RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION); + + if (inip4->ip_v == IPVERSION) { + /* XXX This should be done by the forwarding engine instead */ + inip4->ip_ttl -= 1; + if (inip4->ip_sum >= rte_cpu_to_be_16(0xffff - 0x100)) + inip4->ip_sum += rte_cpu_to_be_16(0x100) + 1; + else + inip4->ip_sum += rte_cpu_to_be_16(0x100); + ds_ecn = inip4->ip_tos; + } else { + inip6 = (struct ip6_hdr *)inip4; + /* XXX This should be done by the forwarding engine instead */ + inip6->ip6_hops -= 1; + ds_ecn = ntohl(inip6->ip6_flow) >> 20; + } + + if (is_ipv6) { + offset += sizeof(struct ip6_hdr); + outip6 = (struct ip6_hdr *)rte_pktmbuf_prepend(m, offset); + + RTE_ASSERT(outip6 != NULL); + + /* Per RFC4301 5.1.2.1 */ + outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20); + outip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); + + outip6->ip6_nxt = IPPROTO_ESP; + outip6->ip6_hops = IPDEFTTL; + + memcpy(&outip6->ip6_src.s6_addr, src, 16); + memcpy(&outip6->ip6_dst.s6_addr, dst, 16); + + return outip6; + } + + offset += sizeof(struct ip); + outip4 = (struct ip *)rte_pktmbuf_prepend(m, offset); + + RTE_ASSERT(outip4 != NULL); + + /* Per RFC4301 5.1.2.1 */ + outip4->ip_v = IPVERSION; + outip4->ip_hl = 5; + outip4->ip_tos = ds_ecn; + outip4->ip_len = htons(rte_pktmbuf_data_len(m)); + + outip4->ip_id = 0; + outip4->ip_off = 0; + + outip4->ip_ttl = IPDEFTTL; + outip4->ip_p = IPPROTO_ESP; + + outip4->ip_src.s_addr = src->ip.ip4; + outip4->ip_dst.s_addr = dst->ip.ip4; + m->packet_type &= ~RTE_PTYPE_L4_MASK; + return outip4; +} + +static inline struct ip * +ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, + struct ip_addr *src, struct ip_addr *dst) +{ + return ipip_outbound(m, offset, 0, src, dst); +} + +static inline struct ip6_hdr * +ip6ip_outbound(struct rte_mbuf *m, uint32_t offset, + struct ip_addr *src, struct ip_addr *dst) +{ + return ipip_outbound(m, offset, 1, src, dst); +} + +static inline void +ip4_ecn_setup(struct ip *ip4) +{ + if (ip4->ip_tos & IPTOS_ECN_MASK) { + unsigned long sum; + uint8_t old; + + old = ip4->ip_tos; + ip4->ip_tos |= IPTOS_ECN_CE; + sum = old + (~(*(uint8_t *)&ip4->ip_tos) & 0xff); + sum += rte_be_to_cpu_16(ip4->ip_sum); + sum = (sum & 0xffff) + (sum >> 16); + ip4->ip_sum = rte_cpu_to_be_16(sum + (sum >> 16)); + } +} + +static inline void +ip6_ecn_setup(struct ip6_hdr *ip6) +{ + if ((ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK) + ip6->ip6_flow = htonl(ntohl(ip6->ip6_flow) | + (IPTOS_ECN_CE << 20)); +} + +static inline void +ipip_inbound(struct rte_mbuf *m, uint32_t offset) +{ + struct ip *inip4, *outip4; + struct ip6_hdr *inip6, *outip6; + uint32_t ip_len, set_ecn; + + outip4 = rte_pktmbuf_mtod(m, struct ip*); + + RTE_ASSERT(outip4->ip_v == IPVERSION || outip4->ip_v == IP6_VERSION); + + if (outip4->ip_v == IPVERSION) { + ip_len = sizeof(struct ip); + set_ecn = ((outip4->ip_tos & IPTOS_ECN_CE) == IPTOS_ECN_CE); + } else { + outip6 = (struct ip6_hdr *)outip4; + ip_len = sizeof(struct ip6_hdr); + set_ecn = ntohl(outip6->ip6_flow) >> 20; + set_ecn = ((set_ecn & IPTOS_ECN_CE) == IPTOS_ECN_CE); + } + + inip4 = (struct ip *)rte_pktmbuf_adj(m, offset + ip_len); + RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION); + + /* Check packet is still bigger than IP header (inner) */ + RTE_ASSERT(rte_pktmbuf_pkt_len(m) > ip_len); + + /* RFC4301 5.1.2.1 Note 6 */ + if (inip4->ip_v == IPVERSION) { + if (set_ecn) + ip4_ecn_setup(inip4); + /* XXX This should be done by the forwarding engine instead */ + inip4->ip_ttl -= 1; + if (inip4->ip_sum >= rte_cpu_to_be_16(0xffff - 0x100)) + inip4->ip_sum += rte_cpu_to_be_16(0x100) + 1; + else + inip4->ip_sum += rte_cpu_to_be_16(0x100); + m->packet_type &= ~RTE_PTYPE_L4_MASK; + if (inip4->ip_p == IPPROTO_UDP) + m->packet_type |= RTE_PTYPE_L4_UDP; + else if (inip4->ip_p == IPPROTO_TCP) + m->packet_type |= RTE_PTYPE_L4_TCP; + } else { + inip6 = (struct ip6_hdr *)inip4; + if (set_ecn) + ip6_ecn_setup(inip6); + /* XXX This should be done by the forwarding engine instead */ + inip6->ip6_hops -= 1; + } +} + +#endif /* __IPIP_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c new file mode 100644 index 00000000..b45b87bd --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -0,0 +1,1828 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipsec.h" +#include "parser.h" + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 + +#define MAX_JUMBO_PKT_LEN 9600 + +#define MEMPOOL_CACHE_SIZE 256 + +#define NB_MBUF (32000) + +#define CDEV_QUEUE_DESC 2048 +#define CDEV_MAP_ENTRIES 1024 +#define CDEV_MP_NB_OBJS 2048 +#define CDEV_MP_CACHE_SZ 64 +#define MAX_QUEUE_PAIRS 1 + +#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ + +#define NB_SOCKETS 4 + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +#define MAX_RX_QUEUE_PER_LCORE 16 + +#define MAX_LCORE_PARAMS 1024 + +#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) + +/* + * Configurable number of RX/TX ring descriptors + */ +#define IPSEC_SECGW_RX_DESC_DEFAULT 1024 +#define IPSEC_SECGW_TX_DESC_DEFAULT 1024 +static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; +static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; + +#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((a) & 0xff) << 56) | \ + ((uint64_t)((b) & 0xff) << 48) | \ + ((uint64_t)((c) & 0xff) << 40) | \ + ((uint64_t)((d) & 0xff) << 32) | \ + ((uint64_t)((e) & 0xff) << 24) | \ + ((uint64_t)((f) & 0xff) << 16) | \ + ((uint64_t)((g) & 0xff) << 8) | \ + ((uint64_t)(h) & 0xff)) +#else +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((h) & 0xff) << 56) | \ + ((uint64_t)((g) & 0xff) << 48) | \ + ((uint64_t)((f) & 0xff) << 40) | \ + ((uint64_t)((e) & 0xff) << 32) | \ + ((uint64_t)((d) & 0xff) << 24) | \ + ((uint64_t)((c) & 0xff) << 16) | \ + ((uint64_t)((b) & 0xff) << 8) | \ + ((uint64_t)(a) & 0xff)) +#endif +#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) + +#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ + addr.addr_bytes[0], addr.addr_bytes[1], \ + addr.addr_bytes[2], addr.addr_bytes[3], \ + addr.addr_bytes[4], addr.addr_bytes[5], \ + 0, 0) + +/* port/source ethernet addr and destination ethernet addr */ +struct ethaddr_info { + uint64_t src, dst; +}; + +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } +}; + +#define CMD_LINE_OPT_CONFIG "config" +#define CMD_LINE_OPT_SINGLE_SA "single-sa" +#define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask" + +enum { + /* long options mapped to a short option */ + + /* first long only option value must be >= 256, so that we won't + * conflict with short options + */ + CMD_LINE_OPT_MIN_NUM = 256, + CMD_LINE_OPT_CONFIG_NUM, + CMD_LINE_OPT_SINGLE_SA_NUM, + CMD_LINE_OPT_CRYPTODEV_MASK_NUM, +}; + +static const struct option lgopts[] = { + {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, + {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM}, + {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM}, + {NULL, 0, 0, 0} +}; + +/* mask of enabled ports */ +static uint32_t enabled_port_mask; +static uint64_t enabled_cryptodev_mask = UINT64_MAX; +static uint32_t unprotected_port_mask; +static int32_t promiscuous_on = 1; +static int32_t numa_on = 1; /**< NUMA is enabled by default. */ +static uint32_t nb_lcores; +static uint32_t single_sa; +static uint32_t single_sa_idx; +static uint32_t frame_size; + +struct lcore_rx_queue { + uint16_t port_id; + uint8_t queue_id; +} __rte_cache_aligned; + +struct lcore_params { + uint16_t port_id; + uint8_t queue_id; + uint8_t lcore_id; +} __rte_cache_aligned; + +static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; + +static struct lcore_params *lcore_params; +static uint16_t nb_lcore_params; + +static struct rte_hash *cdev_map_in; +static struct rte_hash *cdev_map_out; + +struct buffer { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct lcore_conf { + uint16_t nb_rx_queue; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; + struct buffer tx_mbufs[RTE_MAX_ETHPORTS]; + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; +} __rte_cache_aligned; + +static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +static struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, + .max_rx_pkt_len = ETHER_MAX_LEN, + .split_hdr_size = 0, + .offloads = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_CRC_STRIP, + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | + ETH_RSS_TCP | ETH_RSS_SCTP, + }, + }, + .txmode = { + .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS), + }, +}; + +static struct socket_ctx socket_ctx[NB_SOCKETS]; + +struct traffic_type { + const uint8_t *data[MAX_PKT_BURST * 2]; + struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; + uint32_t res[MAX_PKT_BURST * 2]; + uint32_t num; +}; + +struct ipsec_traffic { + struct traffic_type ipsec; + struct traffic_type ip4; + struct traffic_type ip6; +}; + +static inline void +prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) +{ + uint8_t *nlp; + struct ether_hdr *eth; + + eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); + nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p)); + if (*nlp == IPPROTO_ESP) + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + else { + t->ip4.data[t->ip4.num] = nlp; + t->ip4.pkts[(t->ip4.num)++] = pkt; + } + } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { + nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); + nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt)); + if (*nlp == IPPROTO_ESP) + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + else { + t->ip6.data[t->ip6.num] = nlp; + t->ip6.pkts[(t->ip6.num)++] = pkt; + } + } else { + /* Unknown/Unsupported type, drop the packet */ + RTE_LOG(ERR, IPSEC, "Unsupported packet type\n"); + rte_pktmbuf_free(pkt); + } + + /* Check if the packet has been processed inline. For inline protocol + * processed packets, the metadata in the mbuf can be used to identify + * the security processing done on the packet. The metadata will be + * used to retrieve the application registered userdata associated + * with the security session. + */ + + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + struct ipsec_sa *sa; + struct ipsec_mbuf_metadata *priv; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + pkt->port); + + /* Retrieve the userdata registered. Here, the userdata + * registered is the SA pointer. + */ + + sa = (struct ipsec_sa *) + rte_security_get_userdata(ctx, pkt->udata64); + + if (sa == NULL) { + /* userdata could not be retrieved */ + return; + } + + /* Save SA as priv member in mbuf. This will be used in the + * IPsec selector(SP-SA) check. + */ + + priv = get_priv(pkt); + priv->sa = sa; + } +} + +static inline void +prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, + uint16_t nb_pkts) +{ + int32_t i; + + t->ipsec.num = 0; + t->ip4.num = 0; + t->ip6.num = 0; + + for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], + void *)); + prepare_one_packet(pkts[i], t); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_one_packet(pkts[i], t); +} + +static inline void +prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port) +{ + struct ip *ip; + struct ether_hdr *ethhdr; + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + + ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN); + + if (ip->ip_v == IPVERSION) { + pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4; + pkt->l3_len = sizeof(struct ip); + pkt->l2_len = ETHER_HDR_LEN; + + ip->ip_sum = 0; + ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + } else { + pkt->ol_flags |= PKT_TX_IPV6; + pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l2_len = ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + } + + memcpy(ðhdr->s_addr, ðaddr_tbl[port].src, + sizeof(struct ether_addr)); + memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst, + sizeof(struct ether_addr)); +} + +static inline void +prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port) +{ + int32_t i; + const int32_t prefetch_offset = 2; + + for (i = 0; i < (nb_pkts - prefetch_offset); i++) { + rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); + prepare_tx_pkt(pkts[i], port); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_tx_pkt(pkts[i], port); +} + +/* Send burst of packets on an output interface */ +static inline int32_t +send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) +{ + struct rte_mbuf **m_table; + int32_t ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + prepare_tx_burst(m_table, n, port); + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + if (unlikely(ret < n)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Enqueue a single packet, and send burst if queue is filled */ +static inline int32_t +send_single_packet(struct rte_mbuf *m, uint16_t port) +{ + uint32_t lcore_id; + uint16_t len; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static inline void +inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, + uint16_t lim) +{ + struct rte_mbuf *m; + uint32_t i, j, res, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + res = ip->res[i]; + if (res & BYPASS) { + ip->pkts[j++] = m; + continue; + } + if (res & DISCARD) { + rte_pktmbuf_free(m); + continue; + } + + /* Only check SPI match for processed IPSec packets */ + if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { + rte_pktmbuf_free(m); + continue; + } + + sa_idx = ip->res[i] & PROTECT_MASK; + if (sa_idx >= IPSEC_SA_MAX_ENTRIES || + !inbound_sa_check(sa, m, sa_idx)) { + rte_pktmbuf_free(m); + continue; + } + ip->pkts[j++] = m; + } + ip->num = j; +} + +static inline void +process_pkts_inbound(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6; + + nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.num, MAX_PKT_BURST); + + n_ip4 = traffic->ip4.num; + n_ip6 = traffic->ip6.num; + + /* SP/ACL Inbound check ipsec and ip4 */ + for (i = 0; i < nb_pkts_in; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m, + uint8_t *, offsetof(struct ip, ip_p)); + } else if (ip->ip_v == IP6_VERSION) { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m, + uint8_t *, + offsetof(struct ip6_hdr, ip6_nxt)); + } else + rte_pktmbuf_free(m); + } + + inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4, + n_ip4); + + inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6, + n_ip6); +} + +static inline void +outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, + struct traffic_type *ipsec) +{ + struct rte_mbuf *m; + uint32_t i, j, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + sa_idx = ip->res[i] & PROTECT_MASK; + if (ip->res[i] & DISCARD) + rte_pktmbuf_free(m); + else if (ip->res[i] & BYPASS) + ip->pkts[j++] = m; + else if (sa_idx < IPSEC_SA_MAX_ENTRIES) { + ipsec->res[ipsec->num] = sa_idx; + ipsec->pkts[ipsec->num++] = m; + } else /* invalid SA idx */ + rte_pktmbuf_free(m); + } + ip->num = j; +} + +static inline void +process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint16_t idx, nb_pkts_out, i; + + /* Drop any IPsec traffic from protected ports */ + for (i = 0; i < traffic->ipsec.num; i++) + rte_pktmbuf_free(traffic->ipsec.pkts[i]); + + traffic->ipsec.num = 0; + + outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec); + + outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec); + + nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.res, traffic->ipsec.num, + MAX_PKT_BURST); + + for (i = 0; i < nb_pkts_out; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + } else { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + } + } +} + +static inline void +process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint32_t nb_pkts_in, i, idx; + + /* Drop any IPv4 traffic from unprotected ports */ + for (i = 0; i < traffic->ip4.num; i++) + rte_pktmbuf_free(traffic->ip4.pkts[i]); + + traffic->ip4.num = 0; + + /* Drop any IPv6 traffic from unprotected ports */ + for (i = 0; i < traffic->ip6.num; i++) + rte_pktmbuf_free(traffic->ip6.pkts[i]); + + traffic->ip6.num = 0; + + nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.num, MAX_PKT_BURST); + + for (i = 0; i < nb_pkts_in; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + } else { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + } + } +} + +static inline void +process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint32_t nb_pkts_out, i; + struct ip *ip; + + /* Drop any IPsec traffic from protected ports */ + for (i = 0; i < traffic->ipsec.num; i++) + rte_pktmbuf_free(traffic->ipsec.pkts[i]); + + traffic->ipsec.num = 0; + + for (i = 0; i < traffic->ip4.num; i++) + traffic->ip4.res[i] = single_sa_idx; + + for (i = 0; i < traffic->ip6.num; i++) + traffic->ip6.res[i] = single_sa_idx; + + nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts, + traffic->ip4.res, traffic->ip4.num, + MAX_PKT_BURST); + + /* They all sue the same SA (ip4 or ip6 tunnel) */ + m = traffic->ipsec.pkts[i]; + ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) + traffic->ip4.num = nb_pkts_out; + else + traffic->ip6.num = nb_pkts_out; +} + +static inline int32_t +get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) +{ + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + + priv = get_priv(pkt); + + sa = priv->sa; + if (unlikely(sa == NULL)) { + RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); + goto fail; + } + + if (is_ipv6) + return sa->portid; + + /* else */ + return (sa->portid | RTE_LPM_LOOKUP_SUCCESS); + +fail: + if (is_ipv6) + return -1; + + /* else */ + return 0; +} + +static inline void +route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + uint32_t hop[MAX_PKT_BURST * 2]; + uint32_t dst_ip[MAX_PKT_BURST * 2]; + int32_t pkt_hop = 0; + uint16_t i, offset; + uint16_t lpm_pkts = 0; + + if (nb_pkts == 0) + return; + + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + + for (i = 0; i < nb_pkts; i++) { + if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip, ip_dst); + dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], + uint32_t *, offset); + dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); + lpm_pkts++; + } + } + + rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts); + + lpm_pkts = 0; + + for (i = 0; i < nb_pkts; i++) { + if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { + rte_pktmbuf_free(pkts[i]); + continue; + } + send_single_packet(pkts[i], pkt_hop & 0xff); + } +} + +static inline void +route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + int32_t hop[MAX_PKT_BURST * 2]; + uint8_t dst_ip[MAX_PKT_BURST * 2][16]; + uint8_t *ip6_dst; + int32_t pkt_hop = 0; + uint16_t i, offset; + uint16_t lpm_pkts = 0; + + if (nb_pkts == 0) + return; + + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + + for (i = 0; i < nb_pkts; i++) { + if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, + offset); + memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); + lpm_pkts++; + } + } + + rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop, + lpm_pkts); + + lpm_pkts = 0; + + for (i = 0; i < nb_pkts; i++) { + if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if (pkt_hop == -1) { + rte_pktmbuf_free(pkts[i]); + continue; + } + send_single_packet(pkts[i], pkt_hop & 0xff); + } +} + +static inline void +process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, + uint8_t nb_pkts, uint16_t portid) +{ + struct ipsec_traffic traffic; + + prepare_traffic(pkts, &traffic, nb_pkts); + + if (unlikely(single_sa)) { + if (UNPROTECTED_PORT(portid)) + process_pkts_inbound_nosp(&qconf->inbound, &traffic); + else + process_pkts_outbound_nosp(&qconf->outbound, &traffic); + } else { + if (UNPROTECTED_PORT(portid)) + process_pkts_inbound(&qconf->inbound, &traffic); + else + process_pkts_outbound(&qconf->outbound, &traffic); + } + + route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num); + route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); +} + +static inline void +drain_buffers(struct lcore_conf *qconf) +{ + struct buffer *buf; + uint32_t portid; + + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { + buf = &qconf->tx_mbufs[portid]; + if (buf->len == 0) + continue; + send_burst(qconf, buf->len, portid); + buf->len = 0; + } +} + +/* main processing loop */ +static int32_t +main_loop(__attribute__((unused)) void *dummy) +{ + struct rte_mbuf *pkts[MAX_PKT_BURST]; + uint32_t lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; + int32_t i, nb_rx; + uint16_t portid; + uint8_t queueid; + struct lcore_conf *qconf; + int32_t socket_id; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) + / US_PER_S * BURST_TX_DRAIN_US; + struct lcore_rx_queue *rxql; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + rxql = qconf->rx_queue_list; + socket_id = rte_lcore_to_socket_id(lcore_id); + + qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4; + qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6; + qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; + qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; + qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in; + qconf->inbound.cdev_map = cdev_map_in; + qconf->inbound.session_pool = socket_ctx[socket_id].session_pool; + qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; + qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; + qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out; + qconf->outbound.cdev_map = cdev_map_out; + qconf->outbound.session_pool = socket_ctx[socket_id].session_pool; + + if (qconf->nb_rx_queue == 0) { + RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id); + return 0; + } + + RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->nb_rx_queue; i++) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", + lcore_id, portid, queueid); + } + + while (1) { + cur_tsc = rte_rdtsc(); + + /* TX queue buffer drain */ + diff_tsc = cur_tsc - prev_tsc; + + if (unlikely(diff_tsc > drain_tsc)) { + drain_buffers(qconf); + prev_tsc = cur_tsc; + } + + /* Read packet from RX queues */ + for (i = 0; i < qconf->nb_rx_queue; ++i) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, + pkts, MAX_PKT_BURST); + + if (nb_rx > 0) + process_pkts(qconf, pkts, nb_rx, portid); + } + } +} + +static int32_t +check_params(void) +{ + uint8_t lcore; + uint16_t portid; + uint16_t i; + int32_t socket_id; + + if (lcore_params == NULL) { + printf("Error: No port/queue/core mappings\n"); + return -1; + } + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { + printf("error: lcore %hhu is not enabled in " + "lcore mask\n", lcore); + return -1; + } + socket_id = rte_lcore_to_socket_id(lcore); + if (socket_id != 0 && numa_on == 0) { + printf("warning: lcore %hhu is on socket %d " + "with numa off\n", + lcore, socket_id); + } + portid = lcore_params[i].port_id; + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("port %u is not enabled in port mask\n", portid); + return -1; + } + if (!rte_eth_dev_is_valid_port(portid)) { + printf("port %u is not present on the board\n", portid); + return -1; + } + } + return 0; +} + +static uint8_t +get_port_nb_rx_queues(const uint16_t port) +{ + int32_t queue = -1; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].port_id == port && + lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } + return (uint8_t)(++queue); +} + +static int32_t +init_lcore_rx_queues(void) +{ + uint16_t i, nb_rx_queue; + uint8_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].nb_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", + nb_rx_queue + 1, lcore); + return -1; + } + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = + lcore_params[i].port_id; + lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = + lcore_params[i].queue_id; + lcore_conf[lcore].nb_rx_queue++; + } + return 0; +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + fprintf(stderr, "%s [EAL options] --" + " -p PORTMASK" + " [-P]" + " [-u PORTMASK]" + " [-j FRAMESIZE]" + " -f CONFIG_FILE" + " --config (port,queue,lcore)[,(port,queue,lcore)]" + " [--single-sa SAIDX]" + " [--cryptodev_mask MASK]" + "\n\n" + " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" + " -P : Enable promiscuous mode\n" + " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n" + " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n" + " packet size\n" + " -f CONFIG_FILE: Configuration file\n" + " --config (port,queue,lcore): Rx queue configuration\n" + " --single-sa SAIDX: Use single SA index for outbound traffic,\n" + " bypassing the SP\n" + " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n" + " devices to configure\n" + "\n", + prgname); +} + +static int32_t +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if ((pm == 0) && errno) + return -1; + + return pm; +} + +static int32_t +parse_decimal(const char *str) +{ + char *end = NULL; + unsigned long num; + + num = strtoul(str, &end, 10); + if ((str[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + return num; +} + +static int32_t +parse_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_LCORE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int32_t i; + uint32_t size; + + nb_lcore_params = 0; + + while ((p = strchr(p0, '(')) != NULL) { + ++p; + p0 = strchr(p, ')'); + if (p0 == NULL) + return -1; + + size = p0 - p; + if (size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != + _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { + printf("exceeded max number of lcore params: %hu\n", + nb_lcore_params); + return -1; + } + lcore_params_array[nb_lcore_params].port_id = + (uint8_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = + (uint8_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = + (uint8_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; + return 0; +} + +static int32_t +parse_args(int32_t argc, char **argv) +{ + int32_t opt, ret; + char **argvopt; + int32_t option_index; + char *prgname = argv[0]; + int32_t f_present = 0; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'P': + printf("Promiscuous mode selected\n"); + promiscuous_on = 1; + break; + case 'u': + unprotected_port_mask = parse_portmask(optarg); + if (unprotected_port_mask == 0) { + printf("invalid unprotected portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'f': + if (f_present == 1) { + printf("\"-f\" option present more than " + "once!\n"); + print_usage(prgname); + return -1; + } + if (parse_cfg_file(optarg) < 0) { + printf("parsing file \"%s\" failed\n", + optarg); + print_usage(prgname); + return -1; + } + f_present = 1; + break; + case 'j': + { + int32_t size = parse_decimal(optarg); + if (size <= 1518) { + printf("Invalid jumbo frame size\n"); + if (size < 0) { + print_usage(prgname); + return -1; + } + printf("Using default value 9000\n"); + frame_size = 9000; + } else { + frame_size = size; + } + } + printf("Enabled jumbo frames size %u\n", frame_size); + break; + case CMD_LINE_OPT_CONFIG_NUM: + ret = parse_config(optarg); + if (ret) { + printf("Invalid config\n"); + print_usage(prgname); + return -1; + } + break; + case CMD_LINE_OPT_SINGLE_SA_NUM: + ret = parse_decimal(optarg); + if (ret == -1) { + printf("Invalid argument[sa_idx]\n"); + print_usage(prgname); + return -1; + } + + /* else */ + single_sa = 1; + single_sa_idx = ret; + printf("Configured with single SA index %u\n", + single_sa_idx); + break; + case CMD_LINE_OPT_CRYPTODEV_MASK_NUM: + ret = parse_portmask(optarg); + if (ret == -1) { + printf("Invalid argument[portmask]\n"); + print_usage(prgname); + return -1; + } + + /* else */ + enabled_cryptodev_mask = ret; + break; + default: + print_usage(prgname); + return -1; + } + } + + if (f_present == 0) { + printf("Mandatory option \"-f\" not present\n"); + return -1; + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 1; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, const struct ether_addr *eth_addr) +{ + char buf[ETHER_ADDR_FMT_SIZE]; + ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); + printf("%s%s", name, buf); +} + +/* Check the link status of all ports in up to 9s, and print them finally */ +static void +check_all_ports_link_status(uint32_t port_mask) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ + uint16_t portid; + uint8_t count, all_ports_up, print_flag = 0; + struct rte_eth_link link; + + printf("\nChecking link status"); + fflush(stdout); + for (count = 0; count <= MAX_CHECK_TIME; count++) { + all_ports_up = 1; + RTE_ETH_FOREACH_DEV(portid) { + if ((port_mask & (1 << portid)) == 0) + continue; + memset(&link, 0, sizeof(link)); + rte_eth_link_get_nowait(portid, &link); + /* print link status if flag set */ + if (print_flag == 1) { + if (link.link_status) + printf( + "Port%d Link Up - speed %u Mbps -%s\n", + portid, link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + else + printf("Port %d Link Down\n", portid); + continue; + } + /* clear all_ports_up flag if any link down */ + if (link.link_status == ETH_LINK_DOWN) { + all_ports_up = 0; + break; + } + } + /* after finally printing all link status, get out */ + if (print_flag == 1) + break; + + if (all_ports_up == 0) { + printf("."); + fflush(stdout); + rte_delay_ms(CHECK_INTERVAL); + } + + /* set the print_flag if all ports up or timeout */ + if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { + print_flag = 1; + printf("done\n"); + } + } +} + +static int32_t +add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id, + uint16_t qp, struct lcore_params *params, + struct ipsec_ctx *ipsec_ctx, + const struct rte_cryptodev_capabilities *cipher, + const struct rte_cryptodev_capabilities *auth, + const struct rte_cryptodev_capabilities *aead) +{ + int32_t ret = 0; + unsigned long i; + struct cdev_key key = { 0 }; + + key.lcore_id = params->lcore_id; + if (cipher) + key.cipher_algo = cipher->sym.cipher.algo; + if (auth) + key.auth_algo = auth->sym.auth.algo; + if (aead) + key.aead_algo = aead->sym.aead.algo; + + ret = rte_hash_lookup(map, &key); + if (ret != -ENOENT) + return 0; + + for (i = 0; i < ipsec_ctx->nb_qps; i++) + if (ipsec_ctx->tbl[i].id == cdev_id) + break; + + if (i == ipsec_ctx->nb_qps) { + if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) { + printf("Maximum number of crypto devices assigned to " + "a core, increase MAX_QP_PER_LCORE value\n"); + return 0; + } + ipsec_ctx->tbl[i].id = cdev_id; + ipsec_ctx->tbl[i].qp = qp; + ipsec_ctx->nb_qps++; + printf("%s cdev mapping: lcore %u using cdev %u qp %u " + "(cdev_id_qp %lu)\n", str, key.lcore_id, + cdev_id, qp, i); + } + + ret = rte_hash_add_key_data(map, &key, (void *)i); + if (ret < 0) { + printf("Faled to insert cdev mapping for (lcore %u, " + "cdev %u, qp %u), errno %d\n", + key.lcore_id, ipsec_ctx->tbl[i].id, + ipsec_ctx->tbl[i].qp, ret); + return 0; + } + + return 1; +} + +static int32_t +add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, + uint16_t qp, struct lcore_params *params) +{ + int32_t ret = 0; + const struct rte_cryptodev_capabilities *i, *j; + struct rte_hash *map; + struct lcore_conf *qconf; + struct ipsec_ctx *ipsec_ctx; + const char *str; + + qconf = &lcore_conf[params->lcore_id]; + + if ((unprotected_port_mask & (1 << params->port_id)) == 0) { + map = cdev_map_out; + ipsec_ctx = &qconf->outbound; + str = "Outbound"; + } else { + map = cdev_map_in; + ipsec_ctx = &qconf->inbound; + str = "Inbound"; + } + + /* Required cryptodevs with operation chainning */ + if (!(dev_info->feature_flags & + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + return ret; + + for (i = dev_info->capabilities; + i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { + if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret |= add_mapping(map, str, cdev_id, qp, params, + ipsec_ctx, NULL, NULL, i); + continue; + } + + if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + for (j = dev_info->capabilities; + j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { + if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + ret |= add_mapping(map, str, cdev_id, qp, params, + ipsec_ctx, i, j, NULL); + } + } + + return ret; +} + +/* Check if the device is enabled by cryptodev_mask */ +static int +check_cryptodev_mask(uint8_t cdev_id) +{ + if (enabled_cryptodev_mask & (1 << cdev_id)) + return 0; + + return -1; +} + +static int32_t +cryptodevs_init(void) +{ + struct rte_cryptodev_config dev_conf; + struct rte_cryptodev_qp_conf qp_conf; + uint16_t idx, max_nb_qps, qp, i; + int16_t cdev_id, port_id; + struct rte_hash_parameters params = { 0 }; + + params.entries = CDEV_MAP_ENTRIES; + params.key_len = sizeof(struct cdev_key); + params.hash_func = rte_jhash; + params.hash_func_init_val = 0; + params.socket_id = rte_socket_id(); + + params.name = "cdev_map_in"; + cdev_map_in = rte_hash_create(¶ms); + if (cdev_map_in == NULL) + rte_panic("Failed to create cdev_map hash table, errno = %d\n", + rte_errno); + + params.name = "cdev_map_out"; + cdev_map_out = rte_hash_create(¶ms); + if (cdev_map_out == NULL) + rte_panic("Failed to create cdev_map hash table, errno = %d\n", + rte_errno); + + printf("lcore/cryptodev/qp mappings:\n"); + + uint32_t max_sess_sz = 0, sess_sz; + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { + sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id); + if (sess_sz > max_sess_sz) + max_sess_sz = sess_sz; + } + RTE_ETH_FOREACH_DEV(port_id) { + void *sec_ctx; + + if ((enabled_port_mask & (1 << port_id)) == 0) + continue; + + sec_ctx = rte_eth_dev_get_sec_ctx(port_id); + if (sec_ctx == NULL) + continue; + + sess_sz = rte_security_session_get_size(sec_ctx); + if (sess_sz > max_sess_sz) + max_sess_sz = sess_sz; + } + + idx = 0; + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { + struct rte_cryptodev_info cdev_info; + + if (check_cryptodev_mask((uint8_t)cdev_id)) + continue; + + rte_cryptodev_info_get(cdev_id, &cdev_info); + + if (nb_lcore_params > cdev_info.max_nb_queue_pairs) + max_nb_qps = cdev_info.max_nb_queue_pairs; + else + max_nb_qps = nb_lcore_params; + + qp = 0; + i = 0; + while (qp < max_nb_qps && i < nb_lcore_params) { + if (add_cdev_mapping(&cdev_info, cdev_id, qp, + &lcore_params[idx])) + qp++; + idx++; + idx = idx % nb_lcore_params; + i++; + } + + if (qp == 0) + continue; + + dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); + dev_conf.nb_queue_pairs = qp; + + uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; + if (dev_max_sess != 0 && dev_max_sess < (CDEV_MP_NB_OBJS / 2)) + rte_exit(EXIT_FAILURE, + "Device does not support at least %u " + "sessions", CDEV_MP_NB_OBJS / 2); + + if (!socket_ctx[dev_conf.socket_id].session_pool) { + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *sess_mp; + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "sess_mp_%u", dev_conf.socket_id); + sess_mp = rte_mempool_create(mp_name, + CDEV_MP_NB_OBJS, + max_sess_sz, + CDEV_MP_CACHE_SZ, + 0, NULL, NULL, NULL, + NULL, dev_conf.socket_id, + 0); + if (sess_mp == NULL) + rte_exit(EXIT_FAILURE, + "Cannot create session pool on socket %d\n", + dev_conf.socket_id); + else + printf("Allocated session pool on socket %d\n", + dev_conf.socket_id); + socket_ctx[dev_conf.socket_id].session_pool = sess_mp; + } + + if (rte_cryptodev_configure(cdev_id, &dev_conf)) + rte_panic("Failed to initialize cryptodev %u\n", + cdev_id); + + qp_conf.nb_descriptors = CDEV_QUEUE_DESC; + for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) + if (rte_cryptodev_queue_pair_setup(cdev_id, qp, + &qp_conf, dev_conf.socket_id, + socket_ctx[dev_conf.socket_id].session_pool)) + rte_panic("Failed to setup queue %u for " + "cdev_id %u\n", 0, cdev_id); + + if (rte_cryptodev_start(cdev_id)) + rte_panic("Failed to start cryptodev %u\n", + cdev_id); + } + + /* create session pools for eth devices that implement security */ + RTE_ETH_FOREACH_DEV(port_id) { + if ((enabled_port_mask & (1 << port_id)) && + rte_eth_dev_get_sec_ctx(port_id)) { + int socket_id = rte_eth_dev_socket_id(port_id); + + if (!socket_ctx[socket_id].session_pool) { + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *sess_mp; + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "sess_mp_%u", socket_id); + sess_mp = rte_mempool_create(mp_name, + CDEV_MP_NB_OBJS, + max_sess_sz, + CDEV_MP_CACHE_SZ, + 0, NULL, NULL, NULL, + NULL, socket_id, + 0); + if (sess_mp == NULL) + rte_exit(EXIT_FAILURE, + "Cannot create session pool " + "on socket %d\n", socket_id); + else + printf("Allocated session pool " + "on socket %d\n", socket_id); + socket_ctx[socket_id].session_pool = sess_mp; + } + } + } + + + printf("\n"); + + return 0; +} + +static void +port_init(uint16_t portid) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; + uint16_t nb_tx_queue, nb_rx_queue; + uint16_t tx_queueid, rx_queueid, queue, lcore_id; + int32_t ret, socket_id; + struct lcore_conf *qconf; + struct ether_addr ethaddr; + struct rte_eth_conf local_port_conf = port_conf; + + rte_eth_dev_info_get(portid, &dev_info); + + printf("Configuring device port %u:\n", portid); + + rte_eth_macaddr_get(portid, ðaddr); + ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr); + print_ethaddr("Address: ", ðaddr); + printf("\n"); + + nb_rx_queue = get_port_nb_rx_queues(portid); + nb_tx_queue = nb_lcores; + + if (nb_rx_queue > dev_info.max_rx_queues) + rte_exit(EXIT_FAILURE, "Error: queue %u not available " + "(max rx queue is %u)\n", + nb_rx_queue, dev_info.max_rx_queues); + + if (nb_tx_queue > dev_info.max_tx_queues) + rte_exit(EXIT_FAILURE, "Error: queue %u not available " + "(max tx queue is %u)\n", + nb_tx_queue, dev_info.max_tx_queues); + + printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", + nb_rx_queue, nb_tx_queue); + + if (frame_size) { + local_port_conf.rxmode.max_rx_pkt_len = frame_size; + local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) + local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY; + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY) + local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY; + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + portid, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + + ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, + &local_port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: " + "err=%d, port=%d\n", ret, portid); + + ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: " + "err=%d, port=%d\n", ret, portid); + + /* init one TX queue per lcore */ + tx_queueid = 0; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socket_id = 0; + + /* init TX queue */ + printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); + + txconf = &dev_info.default_txconf; + txconf->offloads = local_port_conf.txmode.offloads; + + ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, + socket_id, txconf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%d\n", ret, portid); + + qconf = &lcore_conf[lcore_id]; + qconf->tx_queue_id[portid] = tx_queueid; + tx_queueid++; + + /* init RX queues */ + for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + + if (portid != qconf->rx_queue_list[queue].port_id) + continue; + + rx_queueid = qconf->rx_queue_list[queue].queue_id; + + printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, + socket_id); + + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = local_port_conf.rxmode.offloads; + ret = rte_eth_rx_queue_setup(portid, rx_queueid, + nb_rxd, socket_id, &rxq_conf, + socket_ctx[socket_id].mbuf_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_rx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + } + } + printf("\n"); +} + +static void +pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf) +{ + char s[64]; + uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) : + RTE_MBUF_DEFAULT_BUF_SIZE; + + + snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id); + ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf, + MEMPOOL_CACHE_SIZE, ipsec_metadata_size(), + buff_size, + socket_id); + if (ctx->mbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", + socket_id); + else + printf("Allocated mbuf pool on socket %d\n", socket_id); +} + +static inline int +inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md) +{ + struct ipsec_sa *sa; + + /* For inline protocol processing, the metadata in the event will + * uniquely identify the security session which raised the event. + * Application would then need the userdata it had registered with the + * security session to process the event. + */ + + sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md); + + if (sa == NULL) { + /* userdata could not be retrieved */ + return -1; + } + + /* Sequence number over flow. SA need to be re-established */ + RTE_SET_USED(sa); + return 0; +} + +static int +inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type, + void *param, void *ret_param) +{ + uint64_t md; + struct rte_eth_event_ipsec_desc *event_desc = NULL; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx(port_id); + + RTE_SET_USED(param); + + if (type != RTE_ETH_EVENT_IPSEC) + return -1; + + event_desc = ret_param; + if (event_desc == NULL) { + printf("Event descriptor not set\n"); + return -1; + } + + md = event_desc->metadata; + + if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW) + return inline_ipsec_event_esn_overflow(ctx, md); + else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) { + printf("Invalid IPsec event reported\n"); + return -1; + } + + return -1; +} + +int32_t +main(int32_t argc, char **argv) +{ + int32_t ret; + uint32_t lcore_id; + uint8_t socket_id; + uint16_t portid; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid parameters\n"); + + if ((unprotected_port_mask & enabled_port_mask) != + unprotected_port_mask) + rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", + unprotected_port_mask); + + if (check_params() < 0) + rte_exit(EXIT_FAILURE, "check_params failed\n"); + + ret = init_lcore_rx_queues(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); + + nb_lcores = rte_lcore_count(); + + /* Replicate each context per socket */ + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socket_id = 0; + + if (socket_ctx[socket_id].mbuf_pool) + continue; + + sa_init(&socket_ctx[socket_id], socket_id); + + sp4_init(&socket_ctx[socket_id], socket_id); + + sp6_init(&socket_ctx[socket_id], socket_id); + + rt_init(&socket_ctx[socket_id], socket_id); + + pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); + } + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + port_init(portid); + } + + cryptodevs_init(); + + /* start ports */ + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%d\n", ret, portid); + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(portid); + + rte_eth_dev_callback_register(portid, + RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); + } + + check_all_ports_link_status(enabled_port_mask); + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c new file mode 100644 index 00000000..3d415f1a --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c @@ -0,0 +1,562 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipsec.h" +#include "esp.h" + +static inline void +set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) +{ + if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + struct rte_security_ipsec_tunnel_param *tunnel = + &ipsec->tunnel; + if (sa->flags == IP4_TUNNEL) { + tunnel->type = + RTE_SECURITY_IPSEC_TUNNEL_IPV4; + tunnel->ipv4.ttl = IPDEFTTL; + + memcpy((uint8_t *)&tunnel->ipv4.src_ip, + (uint8_t *)&sa->src.ip.ip4, 4); + + memcpy((uint8_t *)&tunnel->ipv4.dst_ip, + (uint8_t *)&sa->dst.ip.ip4, 4); + } + /* TODO support for Transport and IPV6 tunnel */ + } + ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT; +} + +static inline int +create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) +{ + struct rte_cryptodev_info cdev_info; + unsigned long cdev_id_qp = 0; + int32_t ret = 0; + struct cdev_key key = { 0 }; + + key.lcore_id = (uint8_t)rte_lcore_id(); + + key.cipher_algo = (uint8_t)sa->cipher_algo; + key.auth_algo = (uint8_t)sa->auth_algo; + key.aead_algo = (uint8_t)sa->aead_algo; + + if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) { + ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, + (void **)&cdev_id_qp); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, + "No cryptodev: core %u, cipher_algo %u, " + "auth_algo %u, aead_algo %u\n", + key.lcore_id, + key.cipher_algo, + key.auth_algo, + key.aead_algo); + return -1; + } + } + + RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev " + "%u qp %u\n", sa->spi, + ipsec_ctx->tbl[cdev_id_qp].id, + ipsec_ctx->tbl[cdev_id_qp].qp); + + if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) { + struct rte_security_session_conf sess_conf = { + .action_type = sa->type, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .spi = sa->spi, + .salt = sa->salt, + .options = { 0 }, + .direction = sa->direction, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = (sa->flags == IP4_TUNNEL || + sa->flags == IP6_TUNNEL) ? + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : + RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + } }, + .crypto_xform = sa->xforms, + .userdata = NULL, + + }; + + if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_cryptodev_get_sec_ctx( + ipsec_ctx->tbl[cdev_id_qp].id); + + /* Set IPsec parameters in conf */ + set_ipsec_conf(sa, &(sess_conf.ipsec)); + + sa->sec_session = rte_security_session_create(ctx, + &sess_conf, ipsec_ctx->session_pool); + if (sa->sec_session == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + struct rte_flow_error err; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + sa->portid); + const struct rte_security_capability *sec_cap; + int ret = 0; + + sa->sec_session = rte_security_session_create(ctx, + &sess_conf, ipsec_ctx->session_pool); + if (sa->sec_session == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + + sec_cap = rte_security_capabilities_get(ctx); + + /* iterate until ESP tunnel*/ + while (sec_cap->action != + RTE_SECURITY_ACTION_TYPE_NONE) { + + if (sec_cap->action == sa->type && + sec_cap->protocol == + RTE_SECURITY_PROTOCOL_IPSEC && + sec_cap->ipsec.mode == + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && + sec_cap->ipsec.direction == sa->direction) + break; + sec_cap++; + } + + if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { + RTE_LOG(ERR, IPSEC, + "No suitable security capability found\n"); + return -1; + } + + sa->ol_flags = sec_cap->ol_flags; + sa->security_ctx = ctx; + sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; + + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; + sa->pattern[1].mask = &rte_flow_item_ipv4_mask; + if (sa->flags & IP6_TUNNEL) { + sa->pattern[1].spec = &sa->ipv6_spec; + memcpy(sa->ipv6_spec.hdr.dst_addr, + sa->dst.ip.ip6.ip6_b, 16); + memcpy(sa->ipv6_spec.hdr.src_addr, + sa->src.ip.ip6.ip6_b, 16); + } else { + sa->pattern[1].spec = &sa->ipv4_spec; + sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; + sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; + } + + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[2].spec = &sa->esp_spec; + sa->pattern[2].mask = &rte_flow_item_esp_mask; + sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); + + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + + sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; + sa->action[0].conf = sa->sec_session; + + sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + + sa->attr.egress = (sa->direction == + RTE_SECURITY_IPSEC_SA_DIR_EGRESS); + sa->attr.ingress = (sa->direction == + RTE_SECURITY_IPSEC_SA_DIR_INGRESS); + if (sa->attr.ingress) { + uint8_t rss_key[40]; + struct rte_eth_rss_conf rss_conf = { + .rss_key = rss_key, + .rss_key_len = 40, + }; + struct rte_eth_dev *eth_dev; + uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; + struct rte_flow_action_rss action_rss; + unsigned int i; + unsigned int j; + + sa->action[2].type = RTE_FLOW_ACTION_TYPE_END; + /* Try RSS. */ + sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS; + sa->action[1].conf = &action_rss; + eth_dev = ctx->device; + rte_eth_dev_rss_hash_conf_get(sa->portid, + &rss_conf); + for (i = 0, j = 0; + i < eth_dev->data->nb_rx_queues; ++i) + if (eth_dev->data->rx_queues[i]) + queue[j++] = i; + action_rss = (struct rte_flow_action_rss){ + .types = rss_conf.rss_hf, + .key_len = rss_conf.rss_key_len, + .queue_num = j, + .key = rss_key, + .queue = queue, + }; + ret = rte_flow_validate(sa->portid, &sa->attr, + sa->pattern, sa->action, + &err); + if (!ret) + goto flow_create; + /* Try Queue. */ + sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE; + sa->action[1].conf = + &(struct rte_flow_action_queue){ + .index = 0, + }; + ret = rte_flow_validate(sa->portid, &sa->attr, + sa->pattern, sa->action, + &err); + /* Try End. */ + sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + sa->action[1].conf = NULL; + ret = rte_flow_validate(sa->portid, &sa->attr, + sa->pattern, sa->action, + &err); + if (ret) + goto flow_create_failure; + } else if (sa->attr.egress && + (sa->ol_flags & + RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) { + sa->action[1].type = + RTE_FLOW_ACTION_TYPE_PASSTHRU; + sa->action[2].type = + RTE_FLOW_ACTION_TYPE_END; + } +flow_create: + sa->flow = rte_flow_create(sa->portid, + &sa->attr, sa->pattern, sa->action, &err); + if (sa->flow == NULL) { +flow_create_failure: + RTE_LOG(ERR, IPSEC, + "Failed to create ipsec flow msg: %s\n", + err.message); + return -1; + } + } else if (sa->type == + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { + struct rte_security_ctx *ctx = + (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx(sa->portid); + const struct rte_security_capability *sec_cap; + + if (ctx == NULL) { + RTE_LOG(ERR, IPSEC, + "Ethernet device doesn't have security features registered\n"); + return -1; + } + + /* Set IPsec parameters in conf */ + set_ipsec_conf(sa, &(sess_conf.ipsec)); + + /* Save SA as userdata for the security session. When + * the packet is received, this userdata will be + * retrieved using the metadata from the packet. + * + * The PMD is expected to set similar metadata for other + * operations, like rte_eth_event, which are tied to + * security session. In such cases, the userdata could + * be obtained to uniquely identify the security + * parameters denoted. + */ + + sess_conf.userdata = (void *) sa; + + sa->sec_session = rte_security_session_create(ctx, + &sess_conf, ipsec_ctx->session_pool); + if (sa->sec_session == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + + sec_cap = rte_security_capabilities_get(ctx); + + if (sec_cap == NULL) { + RTE_LOG(ERR, IPSEC, + "No capabilities registered\n"); + return -1; + } + + /* iterate until ESP tunnel*/ + while (sec_cap->action != + RTE_SECURITY_ACTION_TYPE_NONE) { + + if (sec_cap->action == sa->type && + sec_cap->protocol == + RTE_SECURITY_PROTOCOL_IPSEC && + sec_cap->ipsec.mode == + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && + sec_cap->ipsec.direction == sa->direction) + break; + sec_cap++; + } + + if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { + RTE_LOG(ERR, IPSEC, + "No suitable security capability found\n"); + return -1; + } + + sa->ol_flags = sec_cap->ol_flags; + sa->security_ctx = ctx; + } + } else { + sa->crypto_session = rte_cryptodev_sym_session_create( + ipsec_ctx->session_pool); + rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, + sa->crypto_session, sa->xforms, + ipsec_ctx->session_pool); + + rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, + &cdev_info); + } + sa->cdev_id_qp = cdev_id_qp; + + return 0; +} + +static inline void +enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop) +{ + int32_t ret = 0, i; + + cqp->buf[cqp->len++] = cop; + + if (cqp->len == MAX_PKT_BURST) { + int enq_size = cqp->len; + if ((cqp->in_flight + enq_size) > MAX_INFLIGHT) + enq_size -= + (int)((cqp->in_flight + enq_size) - MAX_INFLIGHT); + + if (enq_size > 0) + ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, + cqp->buf, enq_size); + if (ret < cqp->len) { + RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" + " enqueued %u crypto ops out of %u\n", + cqp->id, cqp->qp, + ret, cqp->len); + for (i = ret; i < cqp->len; i++) + rte_pktmbuf_free(cqp->buf[i]->sym->m_src); + } + cqp->in_flight += ret; + cqp->len = 0; + } +} + +static inline void +ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], struct ipsec_sa *sas[], + uint16_t nb_pkts) +{ + int32_t ret = 0, i; + struct ipsec_mbuf_metadata *priv; + struct rte_crypto_sym_op *sym_cop; + struct ipsec_sa *sa; + + for (i = 0; i < nb_pkts; i++) { + if (unlikely(sas[i] == NULL)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_prefetch0(sas[i]); + rte_prefetch0(pkts[i]); + + priv = get_priv(pkts[i]); + sa = sas[i]; + priv->sa = sa; + + switch (sa->type) { + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(sa->sec_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + sym_cop = get_sym_cop(&priv->cop); + sym_cop->m_src = pkts[i]; + + rte_security_attach_session(&priv->cop, + sa->sec_session); + break; + case RTE_SECURITY_ACTION_TYPE_NONE: + + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(sa->crypto_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_crypto_op_attach_sym_session(&priv->cop, + sa->crypto_session); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + if ((unlikely(sa->sec_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; + if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) + rte_security_set_pkt_metadata( + sa->security_ctx, + sa->sec_session, pkts[i], NULL); + continue; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(sa->sec_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_security_attach_session(&priv->cop, + sa->sec_session); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; + if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) + rte_security_set_pkt_metadata( + sa->security_ctx, + sa->sec_session, pkts[i], NULL); + continue; + } + + RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps); + enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop); + } +} + +static inline int +ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], uint16_t max_pkts) +{ + int32_t nb_pkts = 0, ret = 0, i, j, nb_cops; + struct ipsec_mbuf_metadata *priv; + struct rte_crypto_op *cops[max_pkts]; + struct ipsec_sa *sa; + struct rte_mbuf *pkt; + + for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) { + struct cdev_qp *cqp; + + cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++]; + if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) + ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; + + while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { + pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt]; + rte_prefetch0(pkt); + priv = get_priv(pkt); + sa = priv->sa; + ret = xform_func(pkt, sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + pkts[nb_pkts++] = pkt; + } + + if (cqp->in_flight == 0) + continue; + + nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, + cops, max_pkts - nb_pkts); + + cqp->in_flight -= nb_cops; + + for (j = 0; j < nb_cops; j++) { + pkt = cops[j]->sym->m_src; + rte_prefetch0(pkt); + + priv = get_priv(pkt); + sa = priv->sa; + + RTE_ASSERT(sa != NULL); + + if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) { + ret = xform_func(pkt, sa, cops[j]); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + } + pkts[nb_pkts++] = pkt; + } + } + + /* return packets */ + return nb_pkts; +} + +uint16_t +ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t nb_pkts, uint16_t len) +{ + struct ipsec_sa *sas[nb_pkts]; + + inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts); + + ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts); + + return ipsec_dequeue(esp_inbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len) +{ + struct ipsec_sa *sas[nb_pkts]; + + outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts); + + ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts); + + return ipsec_dequeue(esp_outbound_post, ctx, pkts, len); +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec.h b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.h new file mode 100644 index 00000000..c998c807 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#ifndef __IPSEC_H__ +#define __IPSEC_H__ + +#include + +#include +#include +#include +#include + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 +#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 +#define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3 + +#define MAX_PKT_BURST 32 +#define MAX_INFLIGHT 128 +#define MAX_QP_PER_LCORE 256 + +#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */ + +#define IPSEC_OFFLOAD_ESN_SOFTLIMIT 0xffffff00 + +#define IV_OFFSET (sizeof(struct rte_crypto_op) + \ + sizeof(struct rte_crypto_sym_op)) + +#define uint32_t_to_char(ip, a, b, c, d) do {\ + *a = (uint8_t)(ip >> 24 & 0xff);\ + *b = (uint8_t)(ip >> 16 & 0xff);\ + *c = (uint8_t)(ip >> 8 & 0xff);\ + *d = (uint8_t)(ip & 0xff);\ + } while (0) + +#define DEFAULT_MAX_CATEGORIES 1 + +#define IPSEC_SA_MAX_ENTRIES (128) /* must be power of 2, max 2 power 30 */ +#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1)) +#define INVALID_SPI (0) + +#define DISCARD (0x80000000) +#define BYPASS (0x40000000) +#define PROTECT_MASK (0x3fffffff) +#define PROTECT(sa_idx) (SPI2IDX(sa_idx) & PROTECT_MASK) /* SA idx 30 bits */ + +#define IPSEC_XFORM_MAX 2 + +#define IP6_VERSION (6) + +struct rte_crypto_xform; +struct ipsec_xform; +struct rte_mbuf; + +struct ipsec_sa; + +typedef int32_t (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +struct ip_addr { + union { + uint32_t ip4; + union { + uint64_t ip6[2]; + uint8_t ip6_b[16]; + } ip6; + } ip; +}; + +#define MAX_KEY_SIZE 32 + +struct ipsec_sa { + uint32_t spi; + uint32_t cdev_id_qp; + uint64_t seq; + uint32_t salt; + union { + struct rte_cryptodev_sym_session *crypto_session; + struct rte_security_session *sec_session; + }; + enum rte_crypto_cipher_algorithm cipher_algo; + enum rte_crypto_auth_algorithm auth_algo; + enum rte_crypto_aead_algorithm aead_algo; + uint16_t digest_len; + uint16_t iv_len; + uint16_t block_size; + uint16_t flags; +#define IP4_TUNNEL (1 << 0) +#define IP6_TUNNEL (1 << 1) +#define TRANSPORT (1 << 2) + struct ip_addr src; + struct ip_addr dst; + uint8_t cipher_key[MAX_KEY_SIZE]; + uint16_t cipher_key_len; + uint8_t auth_key[MAX_KEY_SIZE]; + uint16_t auth_key_len; + uint16_t aad_len; + union { + struct rte_crypto_sym_xform *xforms; + struct rte_security_ipsec_xform *sec_xform; + }; + enum rte_security_session_action_type type; + enum rte_security_ipsec_sa_direction direction; + uint16_t portid; + struct rte_security_ctx *security_ctx; + uint32_t ol_flags; + +#define MAX_RTE_FLOW_PATTERN (4) +#define MAX_RTE_FLOW_ACTIONS (3) + struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN]; + struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS]; + struct rte_flow_attr attr; + union { + struct rte_flow_item_ipv4 ipv4_spec; + struct rte_flow_item_ipv6 ipv6_spec; + }; + struct rte_flow_item_esp esp_spec; + struct rte_flow *flow; + struct rte_security_session_conf sess_conf; +} __rte_cache_aligned; + +struct ipsec_mbuf_metadata { + struct ipsec_sa *sa; + struct rte_crypto_op cop; + struct rte_crypto_sym_op sym_cop; + uint8_t buf[32]; +} __rte_cache_aligned; + +struct cdev_qp { + uint16_t id; + uint16_t qp; + uint16_t in_flight; + uint16_t len; + struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct ipsec_ctx { + struct rte_hash *cdev_map; + struct sp_ctx *sp4_ctx; + struct sp_ctx *sp6_ctx; + struct sa_ctx *sa_ctx; + uint16_t nb_qps; + uint16_t last_qp; + struct cdev_qp tbl[MAX_QP_PER_LCORE]; + struct rte_mempool *session_pool; + struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); + uint16_t ol_pkts_cnt; +}; + +struct cdev_key { + uint16_t lcore_id; + uint8_t cipher_algo; + uint8_t auth_algo; + uint8_t aead_algo; +}; + +struct socket_ctx { + struct sa_ctx *sa_in; + struct sa_ctx *sa_out; + struct sp_ctx *sp_ip4_in; + struct sp_ctx *sp_ip4_out; + struct sp_ctx *sp_ip6_in; + struct sp_ctx *sp_ip6_out; + struct rt_ctx *rt_ip4; + struct rt_ctx *rt_ip6; + struct rte_mempool *mbuf_pool; + struct rte_mempool *session_pool; +}; + +struct cnt_blk { + uint32_t salt; + uint64_t iv; + uint32_t cnt; +} __attribute__((packed)); + +uint16_t +ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t nb_pkts, uint16_t len); + +uint16_t +ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len); + +static inline uint16_t +ipsec_metadata_size(void) +{ + return sizeof(struct ipsec_mbuf_metadata); +} + +static inline struct ipsec_mbuf_metadata * +get_priv(struct rte_mbuf *m) +{ + return rte_mbuf_to_priv(m); +} + +static inline void * +get_cnt_blk(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[0]; +} + +static inline void * +get_aad(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[16]; +} + +static inline void * +get_sym_cop(struct rte_crypto_op *cop) +{ + return (cop + 1); +} + +int +inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx); + +void +inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], + struct ipsec_sa *sa[], uint16_t nb_pkts); + +void +outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], + struct ipsec_sa *sa[], uint16_t nb_pkts); + +void +sp4_init(struct socket_ctx *ctx, int32_t socket_id); + +void +sp6_init(struct socket_ctx *ctx, int32_t socket_id); + +void +sa_init(struct socket_ctx *ctx, int32_t socket_id); + +void +rt_init(struct socket_ctx *ctx, int32_t socket_id); + +#endif /* __IPSEC_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/meson.build b/src/spdk/dpdk/examples/ipsec-secgw/meson.build new file mode 100644 index 00000000..77d8b298 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +# meson file, for building this example as part of a main DPDK build. +# +# To build this example as a standalone application with an already-installed +# DPDK instance, use 'make' + +deps += ['security', 'lpm', 'acl', 'hash'] +allow_experimental_apis = true +sources = files( + 'esp.c', 'ipsec.c', 'ipsec-secgw.c', 'parser.c', + 'rt.c', 'sa.c', 'sp4.c', 'sp6.c' +) diff --git a/src/spdk/dpdk/examples/ipsec-secgw/parser.c b/src/spdk/dpdk/examples/ipsec-secgw/parser.c new file mode 100644 index 00000000..91282ca9 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/parser.c @@ -0,0 +1,563 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ipsec.h" +#include "parser.h" + +#define PARSE_DELIMITER " \f\n\r\t\v" +static int +parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if ((string == NULL) || + (tokens == NULL) || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if ((i == *n_tokens) && + (NULL != strtok_r(string, PARSE_DELIMITER, &string))) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon + * means error */ + if (dbloct_count == 8) + return 0; + + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask) +{ + char ip_str[INET_ADDRSTRLEN] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strlcpy(ip_str, token, + RTE_MIN((unsigned int long)(pch - token + 1), + sizeof(ip_str))); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strlcpy(ip_str, token, sizeof(ip_str)); + if (mask) + *mask = 0; + } + if (strlen(ip_str) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(ip_str, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask) +{ + char ip_str[256] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strlcpy(ip_str, token, + RTE_MIN((unsigned int long)(pch - token + 1), + sizeof(ip_str))); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strlcpy(ip_str, token, sizeof(ip_str)); + if (mask) + *mask = 0; + } + + if (strlen(ip_str) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(ip_str, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +parse_range(const char *token, uint16_t *low, uint16_t *high) +{ + char ch; + char num_str[20]; + uint32_t pos; + int range_low = -1; + int range_high = -1; + + if (!low || !high) + return -1; + + memset(num_str, 0, 20); + pos = 0; + + while ((ch = *token++) != '\0') { + if (isdigit(ch)) { + if (pos >= 19) + return -1; + num_str[pos++] = ch; + } else if (ch == ':') { + if (range_low != -1) + return -1; + range_low = atoi(num_str); + memset(num_str, 0, 20); + pos = 0; + } + } + + if (strlen(num_str) == 0) + return -1; + + range_high = atoi(num_str); + + *low = (uint16_t)range_low; + *high = (uint16_t)range_high; + + return 0; +} + +/** sp add parse */ +struct cfg_sp_add_cfg_item { + cmdline_fixed_string_t sp_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sp_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sp_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK((parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0), status, "too many arguments"); + + if (status->status < 0) + return; + + if (strcmp(tokens[0], "ipv4") == 0) { + parse_sp4_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else if (strcmp(tokens[0], "ipv6") == 0) { + parse_sp6_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognizable input %s\n", + tokens[0]); + return; + } +} + +static cmdline_parse_token_string_t cfg_sp_add_sp_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, + sp_keyword, "sp"); + +static cmdline_parse_token_string_t cfg_sp_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sp_add_rule = { + .f = cfg_sp_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sp_add_sp_str, + (void *) &cfg_sp_add_multi_str, + NULL, + }, +}; + +/* sa add parse */ +struct cfg_sa_add_cfg_item { + cmdline_fixed_string_t sa_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sa_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sa_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0, status, "too many arguments\n"); + + parse_sa_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_sa_add_sa_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, + sa_keyword, "sa"); + +static cmdline_parse_token_string_t cfg_sa_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sa_add_rule = { + .f = cfg_sa_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sa_add_sa_str, + (void *) &cfg_sa_add_multi_str, + NULL, + }, +}; + +/* rt add parse */ +struct cfg_rt_add_cfg_item { + cmdline_fixed_string_t rt_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_rt_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_rt_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string( + params->multi_string, tokens, &n_tokens) == 0, + status, "too many arguments\n"); + if (status->status < 0) + return; + + parse_rt_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_rt_add_rt_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, + rt_keyword, "rt"); + +static cmdline_parse_token_string_t cfg_rt_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_rt_add_rule = { + .f = cfg_rt_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_rt_add_rt_str, + (void *) &cfg_rt_add_multi_str, + NULL, + }, +}; + +/** set of cfg items */ +cmdline_parse_ctx_t ipsec_ctx[] = { + (cmdline_parse_inst_t *)&cfg_sp_add_rule, + (cmdline_parse_inst_t *)&cfg_sa_add_rule, + (cmdline_parse_inst_t *)&cfg_rt_add_rule, + NULL, +}; + +int +parse_cfg_file(const char *cfg_filename) +{ + struct cmdline *cl = cmdline_stdin_new(ipsec_ctx, ""); + FILE *f = fopen(cfg_filename, "r"); + char str[1024] = {0}, *get_s = NULL; + uint32_t line_num = 0; + struct parse_status status = {0}; + + if (f == NULL) { + rte_panic("Error: invalid file descriptor %s\n", cfg_filename); + goto error_exit; + } + + if (cl == NULL) { + rte_panic("Error: cannot create cmdline instance\n"); + goto error_exit; + } + + cfg_sp_add_rule.data = &status; + cfg_sa_add_rule.data = &status; + cfg_rt_add_rule.data = &status; + + do { + char oneline[1024]; + char *pos; + get_s = fgets(oneline, 1024, f); + + if (!get_s) + break; + + line_num++; + + if (strlen(oneline) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + /* process comment char '#' */ + if (oneline[0] == '#') + continue; + + pos = strchr(oneline, '#'); + if (pos != NULL) + *pos = '\0'; + + /* process line concatenator '\' */ + pos = strchr(oneline, 92); + if (pos != NULL) { + if (pos != oneline+strlen(oneline) - 2) { + rte_panic("%s:%u: error: " + "no character should exist after '\\'\n", + cfg_filename, line_num); + goto error_exit; + } + + *pos = '\0'; + + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the concatenated line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + strcpy(str + strlen(str), oneline); + continue; + } + + /* copy the line to str and process */ + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + strcpy(str + strlen(str), oneline); + + str[strlen(str)] = '\n'; + if (cmdline_parse(cl, str) < 0) { + rte_panic("%s:%u: error: parsing \"%s\" failed\n", + cfg_filename, line_num, str); + goto error_exit; + } + + if (status.status < 0) { + rte_panic("%s:%u: error: %s", cfg_filename, + line_num, status.parse_msg); + goto error_exit; + } + + memset(str, 0, 1024); + } while (1); + + cmdline_stdin_exit(cl); + fclose(f); + + return 0; + +error_exit: + if (cl) + cmdline_stdin_exit(cl); + if (f) + fclose(f); + + return -1; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/parser.h b/src/spdk/dpdk/examples/ipsec-secgw/parser.h new file mode 100644 index 00000000..be02537c --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/parser.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#include +#include +#include + +#ifndef __PARSER_H +#define __PARSER_H + +struct parse_status { + int status; + char parse_msg[256]; +}; + +#define APP_CHECK(exp, status, fmt, ...) \ +do { \ + if (!(exp)) { \ + sprintf(status->parse_msg, fmt "\n", \ + ## __VA_ARGS__); \ + status->status = -1; \ + } else \ + status->status = 0; \ +} while (0) + +#define APP_CHECK_PRESENCE(val, str, status) \ + APP_CHECK(val == 0, status, \ + "item \"%s\" already present", str) + +#define APP_CHECK_TOKEN_EQUAL(tokens, index, ref, status) \ + APP_CHECK(strcmp(tokens[index], ref) == 0, status, \ + "unrecognized input \"%s\": expect \"%s\"\n", \ + tokens[index], ref) + +static inline int +is_str_num(const char *str) +{ + uint32_t i; + + for (i = 0; i < strlen(str); i++) + if (!isdigit(str[i])) + return -1; + + return 0; +} + +#define APP_CHECK_TOKEN_IS_NUM(tokens, index, status) \ + APP_CHECK(is_str_num(tokens[index]) == 0, status, \ + "input \"%s\" is not valid number string", tokens[index]) + + +#define INCREMENT_TOKEN_INDEX(index, max_num, status) \ +do { \ + APP_CHECK(index + 1 < max_num, status, "reaching the end of " \ + "the token array"); \ + index++; \ +} while (0) + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask); + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask); + +int +parse_range(const char *token, uint16_t *low, uint16_t *high); + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +int +parse_cfg_file(const char *cfg_filename); + +#endif diff --git a/src/spdk/dpdk/examples/ipsec-secgw/rt.c b/src/spdk/dpdk/examples/ipsec-secgw/rt.c new file mode 100644 index 00000000..ec3a375f --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/rt.c @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/* + * Routing Table (RT) + */ +#include +#include +#include +#include +#include + +#include "ipsec.h" +#include "parser.h" + +#define RT_IPV4_MAX_RULES 1024 +#define RT_IPV6_MAX_RULES 1024 + +struct ip4_route { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +struct ip6_route { + uint8_t ip[16]; + uint8_t depth; + uint8_t if_out; +}; + +struct ip4_route rt_ip4[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip4; + +struct ip6_route rt_ip6[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip6; + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + uint32_t ti; + uint32_t *n_rts = NULL; + struct ip4_route *route_ipv4 = NULL; + struct ip6_route *route_ipv6 = NULL; + + if (strcmp(tokens[0], "ipv4") == 0) { + n_rts = &nb_rt_ip4; + route_ipv4 = &rt_ip4[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV4_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + + } else if (strcmp(tokens[0], "ipv6") == 0) { + n_rts = &nb_rt_ip6; + route_ipv6 = &rt_ip6[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV6_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[0]); + return; + } + + for (ti = 1; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "dst") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (route_ipv4 != NULL) { + struct in_addr ip; + uint32_t depth = 0; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + route_ipv4->ip = rte_bswap32( + (uint32_t)ip.s_addr); + route_ipv4->depth = (uint8_t)depth; + } else { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK(parse_ipv6_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 address", + tokens[ti]); + if (status->status < 0) + return; + memcpy(route_ipv6->ip, ip.s6_addr, 16); + route_ipv6->depth = (uint8_t)depth; + } + } + + if (strcmp(tokens[ti], "port") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + if (route_ipv4 != NULL) + route_ipv4->if_out = atoi(tokens[ti]); + else + route_ipv6->if_out = atoi(tokens[ti]); + } + } + + *n_rts = *n_rts + 1; +} + +void +rt_init(struct socket_ctx *ctx, int32_t socket_id) +{ + char name[PATH_MAX]; + uint32_t i; + int32_t ret; + struct rte_lpm *lpm; + struct rte_lpm6 *lpm6; + char a, b, c, d; + struct rte_lpm_config conf = { 0 }; + struct rte_lpm6_config conf6 = { 0 }; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->rt_ip4 != NULL) + rte_exit(EXIT_FAILURE, "IPv4 Routing Table for socket %u " + "already initialized\n", socket_id); + + if (ctx->rt_ip6 != NULL) + rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u " + "already initialized\n", socket_id); + + if (nb_rt_ip4 == 0 && nb_rt_ip6 == 0) + RTE_LOG(WARNING, IPSEC, "No Routing rule specified\n"); + + printf("Creating IPv4 Routing Table (RT) context with %u max routes\n", + RT_IPV4_MAX_RULES); + + /* create the LPM table */ + snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id); + conf.max_rules = RT_IPV4_MAX_RULES; + conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES; + lpm = rte_lpm_create(name, socket_id, &conf); + if (lpm == NULL) + rte_exit(EXIT_FAILURE, "Unable to create %s LPM table " + "on socket %d\n", name, socket_id); + + /* populate the LPM table */ + for (i = 0; i < nb_rt_ip4; i++) { + ret = rte_lpm_add(lpm, rt_ip4[i].ip, rt_ip4[i].depth, + rt_ip4[i].if_out); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " + "LPM table on socket %d\n", i, name, socket_id); + + uint32_t_to_char(rt_ip4[i].ip, &a, &b, &c, &d); + printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n", + a, b, c, d, rt_ip4[i].depth, + rt_ip4[i].if_out); + } + + snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id); + conf6.max_rules = RT_IPV6_MAX_RULES; + conf6.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES; + lpm6 = rte_lpm6_create(name, socket_id, &conf6); + if (lpm6 == NULL) + rte_exit(EXIT_FAILURE, "Unable to create %s LPM table " + "on socket %d\n", name, socket_id); + + /* populate the LPM table */ + for (i = 0; i < nb_rt_ip6; i++) { + ret = rte_lpm6_add(lpm6, rt_ip6[i].ip, rt_ip6[i].depth, + rt_ip6[i].if_out); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " + "LPM table on socket %d\n", i, name, socket_id); + + printf("LPM6: Adding route " + " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n", + (uint16_t)((rt_ip6[i].ip[0] << 8) | rt_ip6[i].ip[1]), + (uint16_t)((rt_ip6[i].ip[2] << 8) | rt_ip6[i].ip[3]), + (uint16_t)((rt_ip6[i].ip[4] << 8) | rt_ip6[i].ip[5]), + (uint16_t)((rt_ip6[i].ip[6] << 8) | rt_ip6[i].ip[7]), + (uint16_t)((rt_ip6[i].ip[8] << 8) | rt_ip6[i].ip[9]), + (uint16_t)((rt_ip6[i].ip[10] << 8) | rt_ip6[i].ip[11]), + (uint16_t)((rt_ip6[i].ip[12] << 8) | rt_ip6[i].ip[13]), + (uint16_t)((rt_ip6[i].ip[14] << 8) | rt_ip6[i].ip[15]), + rt_ip6[i].depth, rt_ip6[i].if_out); + } + + ctx->rt_ip4 = (struct rt_ctx *)lpm; + ctx->rt_ip6 = (struct rt_ctx *)lpm6; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sa.c b/src/spdk/dpdk/examples/ipsec-secgw/sa.c new file mode 100644 index 00000000..4ab8e098 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sa.c @@ -0,0 +1,1010 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +/* + * Security Associations + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipsec.h" +#include "esp.h" +#include "parser.h" + +#define IPDEFTTL 64 + +struct supported_cipher_algo { + const char *keyword; + enum rte_crypto_cipher_algorithm algo; + uint16_t iv_len; + uint16_t block_size; + uint16_t key_len; +}; + +struct supported_auth_algo { + const char *keyword; + enum rte_crypto_auth_algorithm algo; + uint16_t digest_len; + uint16_t key_len; + uint8_t key_not_req; +}; + +struct supported_aead_algo { + const char *keyword; + enum rte_crypto_aead_algorithm algo; + uint16_t iv_len; + uint16_t block_size; + uint16_t digest_len; + uint16_t key_len; + uint8_t aad_len; +}; + + +const struct supported_cipher_algo cipher_algos[] = { + { + .keyword = "null", + .algo = RTE_CRYPTO_CIPHER_NULL, + .iv_len = 0, + .block_size = 4, + .key_len = 0 + }, + { + .keyword = "aes-128-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 16 + }, + { + .keyword = "aes-256-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 32 + }, + { + .keyword = "aes-128-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 8, + .block_size = 16, /* XXX AESNI MB limition, should be 4 */ + .key_len = 20 + } +}; + +const struct supported_auth_algo auth_algos[] = { + { + .keyword = "null", + .algo = RTE_CRYPTO_AUTH_NULL, + .digest_len = 0, + .key_len = 0, + .key_not_req = 1 + }, + { + .keyword = "sha1-hmac", + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .digest_len = 12, + .key_len = 20 + }, + { + .keyword = "sha256-hmac", + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .digest_len = 12, + .key_len = 32 + } +}; + +const struct supported_aead_algo aead_algos[] = { + { + .keyword = "aes-128-gcm", + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 20, + .digest_len = 16, + .aad_len = 8, + } +}; + +struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES]; +uint32_t nb_sa_out; + +struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES]; +uint32_t nb_sa_in; + +static const struct supported_cipher_algo * +find_match_cipher_algo(const char *cipher_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + const struct supported_cipher_algo *algo = + &cipher_algos[i]; + + if (strcmp(cipher_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +static const struct supported_auth_algo * +find_match_auth_algo(const char *auth_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + const struct supported_auth_algo *algo = + &auth_algos[i]; + + if (strcmp(auth_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +static const struct supported_aead_algo * +find_match_aead_algo(const char *aead_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(aead_algos); i++) { + const struct supported_aead_algo *algo = + &aead_algos[i]; + + if (strcmp(aead_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +/** parse_key_string + * parse x:x:x:x.... hex number key string into uint8_t *key + * return: + * > 0: number of bytes parsed + * 0: failed + */ +static uint32_t +parse_key_string(const char *key_str, uint8_t *key) +{ + const char *pt_start = key_str, *pt_end = key_str; + uint32_t nb_bytes = 0; + + while (pt_end != NULL) { + char sub_str[3] = {0}; + + pt_end = strchr(pt_start, ':'); + + if (pt_end == NULL) { + if (strlen(pt_start) > 2) + return 0; + strncpy(sub_str, pt_start, 2); + } else { + if (pt_end - pt_start > 2) + return 0; + + strncpy(sub_str, pt_start, pt_end - pt_start); + pt_start = pt_end + 1; + } + + key[nb_bytes++] = strtol(sub_str, NULL, 16); + } + + return nb_bytes; +} + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct ipsec_sa *rule = NULL; + uint32_t ti; /*token index*/ + uint32_t *ri /*rule index*/; + uint32_t cipher_algo_p = 0; + uint32_t auth_algo_p = 0; + uint32_t aead_algo_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t mode_p = 0; + uint32_t type_p = 0; + uint32_t portid_p = 0; + + if (strcmp(tokens[0], "in") == 0) { + ri = &nb_sa_in; + + APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, + "too many sa rules, abort insertion\n"); + if (status->status < 0) + return; + + rule = &sa_in[*ri]; + } else { + ri = &nb_sa_out; + + APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, + "too many sa rules, abort insertion\n"); + if (status->status < 0) + return; + + rule = &sa_out[*ri]; + } + + /* spi number */ + APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); + if (status->status < 0) + return; + if (atoi(tokens[1]) == INVALID_SPI) + return; + rule->spi = atoi(tokens[1]); + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "mode") == 0) { + APP_CHECK_PRESENCE(mode_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "ipv4-tunnel") == 0) + rule->flags = IP4_TUNNEL; + else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) + rule->flags = IP6_TUNNEL; + else if (strcmp(tokens[ti], "transport") == 0) + rule->flags = TRANSPORT; + else { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + mode_p = 1; + continue; + } + + if (strcmp(tokens[ti], "cipher_algo") == 0) { + const struct supported_cipher_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_cipher_algo(tokens[ti]); + + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->cipher_algo = algo->algo; + rule->block_size = algo->block_size; + rule->iv_len = algo->iv_len; + rule->cipher_key_len = algo->key_len; + + /* for NULL algorithm, no cipher key required */ + if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + cipher_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"cipher_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->cipher_key); + APP_CHECK(key_len == rule->cipher_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC) + rule->salt = (uint32_t)rte_rand(); + + if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) { + key_len -= 4; + rule->cipher_key_len = key_len; + memcpy(&rule->salt, + &rule->cipher_key[key_len], 4); + } + + cipher_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "auth_algo") == 0) { + const struct supported_auth_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_auth_algo(tokens[ti]); + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->auth_algo = algo->algo; + rule->auth_key_len = algo->key_len; + rule->digest_len = algo->digest_len; + + /* NULL algorithm and combined algos do not + * require auth key + */ + if (algo->key_not_req) { + auth_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"auth_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->auth_key); + APP_CHECK(key_len == rule->auth_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + auth_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "aead_algo") == 0) { + const struct supported_aead_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(aead_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_aead_algo(tokens[ti]); + + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->aead_algo = algo->algo; + rule->cipher_key_len = algo->key_len; + rule->digest_len = algo->digest_len; + rule->aad_len = algo->aad_len; + rule->block_size = algo->block_size; + rule->iv_len = algo->iv_len; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "aead_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"aead_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->cipher_key); + APP_CHECK(key_len == rule->cipher_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + key_len -= 4; + rule->cipher_key_len = key_len; + memcpy(&rule->salt, + &rule->cipher_key[key_len], 4); + + aead_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (rule->flags == IP4_TUNNEL) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->src.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (rule->flags == IP6_TUNNEL) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->src.ip.ip6.ip6_b, + ip.s6_addr, 16); + } else if (rule->flags == TRANSPORT) { + APP_CHECK(0, status, "unrecognized input " + "\"%s\"", tokens[ti]); + return; + } + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (rule->flags == IP4_TUNNEL) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->dst.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (rule->flags == IP6_TUNNEL) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); + } else if (rule->flags == TRANSPORT) { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "type") == 0) { + APP_CHECK_PRESENCE(type_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "inline-crypto-offload") == 0) + rule->type = + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO; + else if (strcmp(tokens[ti], + "inline-protocol-offload") == 0) + rule->type = + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; + else if (strcmp(tokens[ti], + "lookaside-protocol-offload") == 0) + rule->type = + RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; + else if (strcmp(tokens[ti], "no-offload") == 0) + rule->type = RTE_SECURITY_ACTION_TYPE_NONE; + else { + APP_CHECK(0, status, "Invalid input \"%s\"", + tokens[ti]); + return; + } + + type_p = 1; + continue; + } + + if (strcmp(tokens[ti], "port_id") == 0) { + APP_CHECK_PRESENCE(portid_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->portid = atoi(tokens[ti]); + if (status->status < 0) + return; + portid_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + if (aead_algo_p) { + APP_CHECK(cipher_algo_p == 0, status, + "AEAD used, no need for cipher options"); + if (status->status < 0) + return; + + APP_CHECK(auth_algo_p == 0, status, + "AEAD used, no need for auth options"); + if (status->status < 0) + return; + } else { + APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options"); + if (status->status < 0) + return; + + APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options"); + if (status->status < 0) + return; + } + + APP_CHECK(mode_p == 1, status, "missing mode option"); + if (status->status < 0) + return; + + if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0)) + printf("Missing portid option, falling back to non-offload\n"); + + if (!type_p || !portid_p) { + rule->type = RTE_SECURITY_ACTION_TYPE_NONE; + rule->portid = -1; + } + + *ri = *ri + 1; +} + +static inline void +print_one_sa_rule(const struct ipsec_sa *sa, int inbound) +{ + uint32_t i; + uint8_t a, b, c, d; + + printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + if (cipher_algos[i].algo == sa->cipher_algo && + cipher_algos[i].key_len == sa->cipher_key_len) { + printf("%s ", cipher_algos[i].keyword); + break; + } + } + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + if (auth_algos[i].algo == sa->auth_algo) { + printf("%s ", auth_algos[i].keyword); + break; + } + } + + for (i = 0; i < RTE_DIM(aead_algos); i++) { + if (aead_algos[i].algo == sa->aead_algo) { + printf("%s ", aead_algos[i].keyword); + break; + } + } + + printf("mode:"); + + switch (sa->flags) { + case IP4_TUNNEL: + printf("IP4Tunnel "); + uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); + uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); + break; + case IP6_TUNNEL: + printf("IP6Tunnel "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->src.ip.ip6.ip6_b[i]); + } + printf(" "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); + } + break; + case TRANSPORT: + printf("Transport"); + break; + } + printf("\n"); +} + +struct sa_ctx { + struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; + union { + struct { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; + }; + } xf[IPSEC_SA_MAX_ENTRIES]; +}; + +static struct sa_ctx * +sa_create(const char *name, int32_t socket_id) +{ + char s[PATH_MAX]; + struct sa_ctx *sa_ctx; + uint32_t mz_size; + const struct rte_memzone *mz; + + snprintf(s, sizeof(s), "%s_%u", name, socket_id); + + /* Create SA array table */ + printf("Creating SA context with %u maximum entries\n", + IPSEC_SA_MAX_ENTRIES); + + mz_size = sizeof(struct sa_ctx); + mz = rte_memzone_reserve(s, mz_size, socket_id, + RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) { + printf("Failed to allocate SA DB memory\n"); + rte_errno = -ENOMEM; + return NULL; + } + + sa_ctx = (struct sa_ctx *)mz->addr; + + return sa_ctx; +} + +static int +check_eth_dev_caps(uint16_t portid, uint32_t inbound) +{ + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(portid, &dev_info); + + if (inbound) { + if ((dev_info.rx_offload_capa & + DEV_RX_OFFLOAD_SECURITY) == 0) { + RTE_LOG(WARNING, PORT, + "hardware RX IPSec offload is not supported\n"); + return -EINVAL; + } + + } else { /* outbound */ + if ((dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_SECURITY) == 0) { + RTE_LOG(WARNING, PORT, + "hardware TX IPSec offload is not supported\n"); + return -EINVAL; + } + } + return 0; +} + + +static int +sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries, uint32_t inbound) +{ + struct ipsec_sa *sa; + uint32_t i, idx; + uint16_t iv_length; + + for (i = 0; i < nb_entries; i++) { + idx = SPI2IDX(entries[i].spi); + sa = &sa_ctx->sa[idx]; + if (sa->spi != 0) { + printf("Index %u already in use by SPI %u\n", + idx, sa->spi); + return -EINVAL; + } + *sa = entries[i]; + sa->seq = 0; + + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || + sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (check_eth_dev_caps(sa->portid, inbound)) + return -EINVAL; + } + + sa->direction = (inbound == 1) ? + RTE_SECURITY_IPSEC_SA_DIR_INGRESS : + RTE_SECURITY_IPSEC_SA_DIR_EGRESS; + + switch (sa->flags) { + case IP4_TUNNEL: + sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); + sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); + } + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + iv_length = 16; + + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; + sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; + sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.aead.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.aead.op = (inbound == 1) ? + RTE_CRYPTO_AEAD_OP_DECRYPT : + RTE_CRYPTO_AEAD_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.aead.iv.length = iv_length; + sa_ctx->xf[idx].a.aead.aad_length = + sa->aad_len; + sa_ctx->xf[idx].a.aead.digest_length = + sa->digest_len; + + sa->xforms = &sa_ctx->xf[idx].a; + + print_one_sa_rule(sa, inbound); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + iv_length = sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + iv_length = 16; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, + "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + if (inbound) { + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].b.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].b.cipher.op = + RTE_CRYPTO_CIPHER_OP_DECRYPT; + sa_ctx->xf[idx].b.next = NULL; + sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].b.cipher.iv.length = iv_length; + + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].a.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].a.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].a.auth.op = + RTE_CRYPTO_AUTH_OP_VERIFY; + } else { /* outbound */ + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.cipher.op = + RTE_CRYPTO_CIPHER_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.cipher.iv.length = iv_length; + + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].b.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].b.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].b.auth.op = + RTE_CRYPTO_AUTH_OP_GENERATE; + } + + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; + sa_ctx->xf[idx].b.next = NULL; + sa->xforms = &sa_ctx->xf[idx].a; + + print_one_sa_rule(sa, inbound); + } + } + + return 0; +} + +static inline int +sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries) +{ + return sa_add_rules(sa_ctx, entries, nb_entries, 0); +} + +static inline int +sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries) +{ + return sa_add_rules(sa_ctx, entries, nb_entries, 1); +} + +void +sa_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sa_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " + "initialized\n", socket_id); + + if (ctx->sa_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " + "initialized\n", socket_id); + + if (nb_sa_in > 0) { + name = "sa_in"; + ctx->sa_in = sa_create(name, socket_id); + if (ctx->sa_in == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); + + sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in); + } else + RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); + + if (nb_sa_out > 0) { + name = "sa_out"; + ctx->sa_out = sa_create(name, socket_id); + if (ctx->sa_out == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); + + sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out); + } else + RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " + "specified\n"); +} + +int +inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) +{ + struct ipsec_mbuf_metadata *priv; + + priv = get_priv(m); + + return (sa_ctx->sa[sa_idx].spi == priv->sa->spi); +} + +static inline void +single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt, + struct ipsec_sa **sa_ret) +{ + struct esp_hdr *esp; + struct ip *ip; + uint32_t *src4_addr; + uint8_t *src6_addr; + struct ipsec_sa *sa; + + *sa_ret = NULL; + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + if (ip->ip_v == IPVERSION) + esp = (struct esp_hdr *)(ip + 1); + else + esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1); + + if (esp->spi == INVALID_SPI) + return; + + sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))]; + if (rte_be_to_cpu_32(esp->spi) != sa->spi) + return; + + switch (sa->flags) { + case IP4_TUNNEL: + src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src)); + if ((ip->ip_v == IPVERSION) && + (sa->src.ip.ip4 == *src4_addr) && + (sa->dst.ip.ip4 == *(src4_addr + 1))) + *sa_ret = sa; + break; + case IP6_TUNNEL: + src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src)); + if ((ip->ip_v == IP6_VERSION) && + !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) && + !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16)) + *sa_ret = sa; + break; + case TRANSPORT: + *sa_ret = sa; + } +} + +void +inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], + struct ipsec_sa *sa[], uint16_t nb_pkts) +{ + uint32_t i; + + for (i = 0; i < nb_pkts; i++) + single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]); +} + +void +outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], + struct ipsec_sa *sa[], uint16_t nb_pkts) +{ + uint32_t i; + + for (i = 0; i < nb_pkts; i++) + sa[i] = &sa_ctx->sa[sa_idx[i]]; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sp4.c b/src/spdk/dpdk/examples/ipsec-secgw/sp4.c new file mode 100644 index 00000000..8d3d3d8e --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sp4.c @@ -0,0 +1,506 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/* + * Security Policies + */ +#include +#include +#include + +#include +#include + +#include "ipsec.h" +#include "parser.h" + +#define MAX_ACL_RULE_NUM 1024 + +/* + * Rule and trace formats definitions. + */ +enum { + PROTO_FIELD_IPV4, + SRC_FIELD_IPV4, + DST_FIELD_IPV4, + SRCP_FIELD_IPV4, + DSTP_FIELD_IPV4, + NUM_FIELDS_IPV4 +}; + +/* + * That effectively defines order of IPV4 classifications: + * - PROTO + * - SRC IP ADDRESS + * - DST IP ADDRESS + * - PORTS (SRC and DST) + */ +enum { + RTE_ACL_IPV4_PROTO, + RTE_ACL_IPV4_SRC, + RTE_ACL_IPV4_DST, + RTE_ACL_IPV4_PORTS, + RTE_ACL_IPV4_NUM +}; + +struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = { + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = PROTO_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PROTO, + .offset = 0, + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = SRC_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_SRC, + .offset = offsetof(struct ip, ip_src) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = DST_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_DST, + .offset = offsetof(struct ip, ip_dst) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = SRCP_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PORTS, + .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = DSTP_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PORTS, + .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + + sizeof(uint16_t) + }, +}; + +RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs)); + +struct acl4_rules acl4_rules_out[MAX_ACL_RULE_NUM]; +uint32_t nb_acl4_rules_out; + +struct acl4_rules acl4_rules_in[MAX_ACL_RULE_NUM]; +uint32_t nb_acl4_rules_in; + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl4_rules *rule_ipv4 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl4_rules_in; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, + "too many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv4 = &acl4_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl4_rules_out; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, + "too many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv4 = &acl4_rules_out[*ri]; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; + } + + rule_ipv4->data.category_mask = 1; + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = + PROTECT(atoi(tokens[ti])); + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[1].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[1].mask_range.u32 = + depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[2].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[2].mask_range.u32 = + depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv4->field[0].value.u8 = (uint8_t)low; + rule_ipv4->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[3].value.u16 = port_low; + rule_ipv4->field[3].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[4].value.u16 = port_low; + rule_ipv4->field[4].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static void +print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra) +{ + uint8_t a, b, c, d; + + uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32, + &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d, + rule->field[SRC_FIELD_IPV4].mask_range.u32); + uint32_t_to_char(rule->field[DST_FIELD_IPV4].value.u32, + &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d, + rule->field[DST_FIELD_IPV4].mask_range.u32); + printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ", + rule->field[SRCP_FIELD_IPV4].value.u16, + rule->field[SRCP_FIELD_IPV4].mask_range.u16, + rule->field[DSTP_FIELD_IPV4].value.u16, + rule->field[DSTP_FIELD_IPV4].mask_range.u16, + rule->field[PROTO_FIELD_IPV4].value.u8, + rule->field[PROTO_FIELD_IPV4].mask_range.u8); + if (extra) + printf("0x%x-0x%x-0x%x ", + rule->data.category_mask, + rule->data.priority, + rule->data.userdata); +} + +static inline void +dump_ip4_rules(const struct acl4_rules *rule, int32_t num, int32_t extra) +{ + int32_t i; + + for (i = 0; i < num; i++, rule++) { + printf("\t%d:", i + 1); + print_one_ip4_rule(rule, extra); + printf("\n"); + } +} + +static struct rte_acl_ctx * +acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules, + uint32_t rules_nb) +{ + char s[PATH_MAX]; + struct rte_acl_param acl_param; + struct rte_acl_config acl_build_param; + struct rte_acl_ctx *ctx; + + printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM); + + memset(&acl_param, 0, sizeof(acl_param)); + + /* Create ACL contexts */ + snprintf(s, sizeof(s), "%s_%d", name, socketid); + + printf("IPv4 %s entries [%u]:\n", s, rules_nb); + dump_ip4_rules(rules, rules_nb, 1); + + acl_param.name = s; + acl_param.socket_id = socketid; + acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip4_defs)); + acl_param.max_rule_num = MAX_ACL_RULE_NUM; + + ctx = rte_acl_create(&acl_param); + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + + if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules, + rules_nb) < 0) + rte_exit(EXIT_FAILURE, "add rules failed\n"); + + /* Perform builds */ + memset(&acl_build_param, 0, sizeof(acl_build_param)); + + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + acl_build_param.num_fields = RTE_DIM(ip4_defs); + memcpy(&acl_build_param.defs, ip4_defs, sizeof(ip4_defs)); + + if (rte_acl_build(ctx, &acl_build_param) != 0) + rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n"); + + rte_acl_dump(ctx); + + return ctx; +} + +void +sp4_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sp_ip4_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already " + "initialized\n", socket_id); + + if (ctx->sp_ip4_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already " + "initialized\n", socket_id); + + if (nb_acl4_rules_in > 0) { + name = "sp_ip4_in"; + ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_in, nb_acl4_rules_in); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Inbound rule " + "specified\n"); + + if (nb_acl4_rules_out > 0) { + name = "sp_ip4_out"; + ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_out, nb_acl4_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Outbound rule " + "specified\n"); +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sp6.c b/src/spdk/dpdk/examples/ipsec-secgw/sp6.c new file mode 100644 index 00000000..6002afef --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sp6.c @@ -0,0 +1,620 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/* + * Security Policies + */ +#include +#include +#include + +#include +#include + +#include "ipsec.h" +#include "parser.h" + +#define MAX_ACL_RULE_NUM 1024 + +enum { + IP6_PROTO, + IP6_SRC0, + IP6_SRC1, + IP6_SRC2, + IP6_SRC3, + IP6_DST0, + IP6_DST1, + IP6_DST2, + IP6_DST3, + IP6_SRCP, + IP6_DSTP, + IP6_NUM +}; + +#define IP6_ADDR_SIZE 16 + +struct rte_acl_field_def ip6_defs[IP6_NUM] = { + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = IP6_PROTO, + .input_index = IP6_PROTO, + .offset = 0, + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC0, + .input_index = IP6_SRC0, + .offset = 2 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC1, + .input_index = IP6_SRC1, + .offset = 6 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC2, + .input_index = IP6_SRC2, + .offset = 10 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC3, + .input_index = IP6_SRC3, + .offset = 14 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST0, + .input_index = IP6_DST0, + .offset = 18 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST1, + .input_index = IP6_DST1, + .offset = 22 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST2, + .input_index = IP6_DST2, + .offset = 26 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST3, + .input_index = IP6_DST3, + .offset = 30 + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = IP6_SRCP, + .input_index = IP6_SRCP, + .offset = 34 + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = IP6_DSTP, + .input_index = IP6_SRCP, + .offset = 36 + } +}; + +RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs)); + +struct acl6_rules acl6_rules_out[MAX_ACL_RULE_NUM]; +uint32_t nb_acl6_rules_out; + +struct acl6_rules acl6_rules_in[MAX_ACL_RULE_NUM]; +uint32_t nb_acl6_rules_in; + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl6_rules *rule_ipv6 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl6_rules_in; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, "too " + "many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv6 = &acl6_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl6_rules_out; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, "too " + "many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv6 = &acl6_rules_out[*ri]; + + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; + } + + rule_ipv6->data.category_mask = 1; + + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = + PROTECT(atoi(tokens[ti])); + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[1].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[1].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[2].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[2].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[3].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[3].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[4].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[4].mask_range.u32 = + (depth > 32) ? 32 : depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[5].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[5].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[6].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[6].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[7].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[7].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[8].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[8].mask_range.u32 = + (depth > 32) ? 32 : depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv6->field[0].value.u8 = (uint8_t)low; + rule_ipv6->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[9].value.u16 = port_low; + rule_ipv6->field[9].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[10].value.u16 = port_low; + rule_ipv6->field[10].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static inline void +print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra) +{ + uint8_t a, b, c, d; + + uint32_t_to_char(rule->field[IP6_SRC0].value.u32, + &a, &b, &c, &d); + printf("%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC1].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC2].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC3].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d, + rule->field[IP6_SRC0].mask_range.u32 + + rule->field[IP6_SRC1].mask_range.u32 + + rule->field[IP6_SRC2].mask_range.u32 + + rule->field[IP6_SRC3].mask_range.u32); + + uint32_t_to_char(rule->field[IP6_DST0].value.u32, + &a, &b, &c, &d); + printf("%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST1].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST2].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST3].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d, + rule->field[IP6_DST0].mask_range.u32 + + rule->field[IP6_DST1].mask_range.u32 + + rule->field[IP6_DST2].mask_range.u32 + + rule->field[IP6_DST3].mask_range.u32); + + printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ", + rule->field[IP6_SRCP].value.u16, + rule->field[IP6_SRCP].mask_range.u16, + rule->field[IP6_DSTP].value.u16, + rule->field[IP6_DSTP].mask_range.u16, + rule->field[IP6_PROTO].value.u8, + rule->field[IP6_PROTO].mask_range.u8); + if (extra) + printf("0x%x-0x%x-0x%x ", + rule->data.category_mask, + rule->data.priority, + rule->data.userdata); +} + +static inline void +dump_ip6_rules(const struct acl6_rules *rule, int32_t num, int32_t extra) +{ + int32_t i; + + for (i = 0; i < num; i++, rule++) { + printf("\t%d:", i + 1); + print_one_ip6_rule(rule, extra); + printf("\n"); + } +} + +static struct rte_acl_ctx * +acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules, + uint32_t rules_nb) +{ + char s[PATH_MAX]; + struct rte_acl_param acl_param; + struct rte_acl_config acl_build_param; + struct rte_acl_ctx *ctx; + + printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM); + + memset(&acl_param, 0, sizeof(acl_param)); + + /* Create ACL contexts */ + snprintf(s, sizeof(s), "%s_%d", name, socketid); + + printf("IPv4 %s entries [%u]:\n", s, rules_nb); + dump_ip6_rules(rules, rules_nb, 1); + + acl_param.name = s; + acl_param.socket_id = socketid; + acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip6_defs)); + acl_param.max_rule_num = MAX_ACL_RULE_NUM; + + ctx = rte_acl_create(&acl_param); + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + + if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules, + rules_nb) < 0) + rte_exit(EXIT_FAILURE, "add rules failed\n"); + + /* Perform builds */ + memset(&acl_build_param, 0, sizeof(acl_build_param)); + + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + acl_build_param.num_fields = RTE_DIM(ip6_defs); + memcpy(&acl_build_param.defs, ip6_defs, sizeof(ip6_defs)); + + if (rte_acl_build(ctx, &acl_build_param) != 0) + rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n"); + + rte_acl_dump(ctx); + + return ctx; +} + +void +sp6_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sp_ip6_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound IPv6 SP DB for socket %u " + "already initialized\n", socket_id); + + if (ctx->sp_ip6_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u " + "already initialized\n", socket_id); + + if (nb_acl6_rules_in > 0) { + name = "sp_ip6_in"; + ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_in, nb_acl6_rules_in); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Inbound rule " + "specified\n"); + + if (nb_acl6_rules_out > 0) { + name = "sp_ip6_out"; + ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_out, nb_acl6_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Outbound rule " + "specified\n"); +} -- cgit v1.2.3