diff options
Diffstat (limited to 'src/spdk/dpdk/examples/ipsec-secgw')
51 files changed, 16345 insertions, 0 deletions
diff --git a/src/spdk/dpdk/examples/ipsec-secgw/Makefile b/src/spdk/dpdk/examples/ipsec-secgw/Makefile new file mode 100644 index 000000000..c4a272a30 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/Makefile @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016 Intel Corporation + +APP = ipsec-secgw + +# +# all source are stored in SRCS-y +# +SRCS-y += parser.c +SRCS-y += ipsec.c +SRCS-y += esp.c +SRCS-y += sp4.c +SRCS-y += sp6.c +SRCS-y += sa.c +SRCS-y += sad.c +SRCS-y += rt.c +SRCS-y += ipsec_process.c +SRCS-y += ipsec-secgw.c +SRCS-y += ipsec_worker.c +SRCS-y += event_helper.c + +CFLAGS += -gdwarf-2 + +# Build using pkg-config variables if possible +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) + +all: shared +.PHONY: shared static +shared: build/$(APP)-shared + ln -sf $(APP)-shared build/$(APP) +static: build/$(APP)-static + ln -sf $(APP)-static build/$(APP) + +PKGCONF ?= pkg-config + +PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) +CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) +LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) +LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk) + +CFLAGS += -DALLOW_EXPERIMENTAL_API + +build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) + +build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC) + +build: + @mkdir -p $@ + +.PHONY: clean +clean: + rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared + test -d build && rmdir -p build || true + +else + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, detect a build directory, by looking for a path with a .config +RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config))))) + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(CONFIG_RTE_LIBRTE_IPSEC),y) +$(error "RTE_LIBRTE_IPSEC is required to build ipsec-secgw") +endif +endif + +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 -gdwarf-2 +CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_sa.o += -diag-disable=vec +endif + +ifeq ($(DEBUG),1) +CFLAGS += -DIPSEC_DEBUG -fstack-protector-all -O0 +endif + +include $(RTE_SDK)/mk/rte.extapp.mk + +endif diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg b/src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg new file mode 100644 index 000000000..6f8d5aa53 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ep0.cfg @@ -0,0 +1,171 @@ +########################################################################### +# IPSEC-SECGW Endpoint sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint 0 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep0.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 out esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 in esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 117 pri 1 dst 192.168.212.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 out esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 115 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 116 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 127 pri 1 dst ffff:0000:0000:0000:cccc:dddd:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa out 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa out 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa out 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa out 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa out 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa out 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa in 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa in 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa in 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 117 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.7 \ +dst 172.16.1.7 flow-direction 0 2 + +sa in 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa in 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +sa in 127 cipher_algo null auth_algo null mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:7777 \ +dst 1111:1111:1111:1111:1111:1111:1111:7777 \ +flow-direction 0 3 + +#Routing rules +rt ipv4 dst 172.16.2.5/32 port 0 +rt ipv4 dst 172.16.2.6/32 port 1 +rt ipv4 dst 192.168.175.0/24 port 0 +rt ipv4 dst 192.168.176.0/24 port 1 +rt ipv4 dst 192.168.240.0/24 port 0 +rt ipv4 dst 192.168.241.0/24 port 1 +rt ipv4 dst 192.168.115.0/24 port 2 +rt ipv4 dst 192.168.116.0/24 port 3 +rt ipv4 dst 192.168.65.0/24 port 2 +rt ipv4 dst 192.168.66.0/24 port 3 +rt ipv4 dst 192.168.185.0/24 port 2 +rt ipv4 dst 192.168.186.0/24 port 3 +rt ipv4 dst 192.168.210.0/24 port 2 +rt ipv4 dst 192.168.211.0/24 port 3 +rt ipv4 dst 192.168.245.0/24 port 2 +rt ipv4 dst 192.168.246.0/24 port 3 + +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:5555/116 port 0 +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:6666/116 port 1 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg b/src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg new file mode 100644 index 000000000..19bdc68ea --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ep1.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint1 sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint1 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep1.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 in esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 out esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 in esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 out esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 115 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 116 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa in 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa in 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa in 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa in 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa in 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa in 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa out 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa out 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa out 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa out 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.1.5/32 port 0 +rt ipv4 dst 172.16.1.6/32 port 1 +rt ipv4 dst 192.168.185.0/24 port 0 +rt ipv4 dst 192.168.186.0/24 port 1 +rt ipv4 dst 192.168.245.0/24 port 0 +rt ipv4 dst 192.168.246.0/24 port 1 +rt ipv4 dst 192.168.105.0/24 port 2 +rt ipv4 dst 192.168.106.0/24 port 3 +rt ipv4 dst 192.168.55.0/24 port 2 +rt ipv4 dst 192.168.56.0/24 port 3 +rt ipv4 dst 192.168.175.0/24 port 2 +rt ipv4 dst 192.168.176.0/24 port 3 +rt ipv4 dst 192.168.200.0/24 port 2 +rt ipv4 dst 192.168.201.0/24 port 3 +rt ipv4 dst 192.168.240.0/24 port 2 +rt ipv4 dst 192.168.241.0/24 port 3 + +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:5555/116 port 0 +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:6666/116 port 1 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/src/spdk/dpdk/examples/ipsec-secgw/esp.c b/src/spdk/dpdk/examples/ipsec-secgw/esp.c new file mode 100644 index 000000000..bfa7ff721 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/esp.c @@ -0,0 +1,478 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#include <stdint.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> +#include <fcntl.h> +#include <unistd.h> + +#include <rte_common.h> +#include <rte_crypto.h> +#include <rte_cryptodev.h> +#include <rte_random.h> + +#include "ipsec.h" +#include "esp.h" +#include "ipip.h" + +int +esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4; + struct rte_crypto_sym_op *sym_cop; + int32_t payload_len, ip_hdr_len; + + RTE_ASSERT(sa != NULL); + if (ipsec_get_action_type(sa) == + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + return 0; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(cop != NULL); + + ip4 = rte_pktmbuf_mtod(m, struct ip *); + if (likely(ip4->ip_v == IPVERSION)) + ip_hdr_len = ip4->ip_hl * 4; + else if (ip4->ip_v == IP6_VERSION) + /* XXX No option headers supported */ + ip_hdr_len = sizeof(struct ip6_hdr); + else { + RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", + ip4->ip_v); + return -EINVAL; + } + + payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - + sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len; + + if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { + RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", + payload_len, sa->block_size); + return -EINVAL; + } + + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + sym_cop->aead.data.offset = + ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len; + sym_cop->aead.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *aad; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + + sizeof(struct rte_esp_hdr)); + + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + + aad = get_aad(m); + memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8); + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + + sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } else { + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct rte_esp_hdr) + + sa->iv_len; + sym_cop->cipher.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + + sizeof(struct rte_esp_hdr)); + uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, + uint8_t *, IV_OFFSET); + + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: + case RTE_CRYPTO_CIPHER_AES_CBC: + /* Copy IV at the end of crypto operation */ + rte_memcpy(iv_ptr, iv, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) + + sa->iv_len + payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } + + return 0; +} + +int +esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4, *ip; + struct ip6_hdr *ip6; + uint8_t *nexthdr, *pad_len; + uint8_t *padding; + uint16_t i; + struct rte_ipsec_session *ips; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + RTE_ASSERT(cop != NULL); + + ips = ipsec_get_primary_session(sa); + + if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + } + + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__); + return -1; + } + + if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + ips->security.ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) { + nexthdr = &m->inner_esp_next_proto; + } else { + nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, + rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); + pad_len = nexthdr - 1; + + padding = pad_len - *pad_len; + for (i = 0; i < *pad_len; i++) { + if (padding[i] != i + 1) { + RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + return -EINVAL; + } + } + + if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { + RTE_LOG(ERR, IPSEC_ESP, + "failed to remove pad_len + digest\n"); + return -EINVAL; + } + } + + if (unlikely(IS_TRANSPORT(sa->flags))) { + ip = rte_pktmbuf_mtod(m, struct ip *); + ip4 = (struct ip *)rte_pktmbuf_adj(m, + sizeof(struct rte_esp_hdr) + sa->iv_len); + if (likely(ip->ip_v == IPVERSION)) { + memmove(ip4, ip, ip->ip_hl * 4); + ip4->ip_p = *nexthdr; + ip4->ip_len = htons(rte_pktmbuf_data_len(m)); + } else { + ip6 = (struct ip6_hdr *)ip4; + /* XXX No option headers supported */ + memmove(ip6, ip, sizeof(struct ip6_hdr)); + ip6->ip6_nxt = *nexthdr; + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); + } + } else + ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len); + + return 0; +} + +int +esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4; + struct ip6_hdr *ip6; + struct rte_esp_hdr *esp = NULL; + uint8_t *padding = NULL, *new_ip, nlp; + struct rte_crypto_sym_op *sym_cop; + int32_t i; + uint16_t pad_payload_len, pad_len, ip_hdr_len; + struct rte_ipsec_session *ips; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + + ips = ipsec_get_primary_session(sa); + ip_hdr_len = 0; + + ip4 = rte_pktmbuf_mtod(m, struct ip *); + if (likely(ip4->ip_v == IPVERSION)) { + if (unlikely(IS_TRANSPORT(sa->flags))) { + ip_hdr_len = ip4->ip_hl * 4; + nlp = ip4->ip_p; + } else + nlp = IPPROTO_IPIP; + } else if (ip4->ip_v == IP6_VERSION) { + if (unlikely(IS_TRANSPORT(sa->flags))) { + /* XXX No option headers supported */ + ip_hdr_len = sizeof(struct ip6_hdr); + ip6 = (struct ip6_hdr *)ip4; + nlp = ip6->ip6_nxt; + } else + nlp = IPPROTO_IPV6; + } else { + RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", + ip4->ip_v); + return -EINVAL; + } + + /* Padded payload length */ + pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - + ip_hdr_len + 2, sa->block_size); + pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); + + RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags)); + + if (likely(IS_IP4_TUNNEL(sa->flags))) + ip_hdr_len = sizeof(struct ip); + else if (IS_IP6_TUNNEL(sa->flags)) + ip_hdr_len = sizeof(struct ip6_hdr); + else if (!IS_TRANSPORT(sa->flags)) { + RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", + sa->flags); + return -EINVAL; + } + + /* Check maximum packet size */ + if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len + + pad_payload_len + sa->digest_len > IP_MAXPACKET)) { + RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); + return -EINVAL; + } + + /* Add trailer padding if it is not constructed by HW */ + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + !(ips->security.ol_flags & + RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) { + padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + + sa->digest_len); + if (unlikely(padding == NULL)) { + RTE_LOG(ERR, IPSEC_ESP, + "not enough mbuf trailing space\n"); + return -ENOSPC; + } + rte_prefetch0(padding); + } + + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { + case IP4_TUNNEL: + ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len, + &sa->src, &sa->dst); + esp = (struct rte_esp_hdr *)(ip4 + 1); + break; + case IP6_TUNNEL: + ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len, + &sa->src, &sa->dst); + esp = (struct rte_esp_hdr *)(ip6 + 1); + break; + case TRANSPORT: + new_ip = (uint8_t *)rte_pktmbuf_prepend(m, + sizeof(struct rte_esp_hdr) + sa->iv_len); + memmove(new_ip, ip4, ip_hdr_len); + esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len); + ip4 = (struct ip *)new_ip; + if (likely(ip4->ip_v == IPVERSION)) { + ip4->ip_p = IPPROTO_ESP; + ip4->ip_len = htons(rte_pktmbuf_data_len(m)); + } else { + ip6 = (struct ip6_hdr *)new_ip; + ip6->ip6_nxt = IPPROTO_ESP; + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); + } + } + + sa->seq++; + esp->spi = rte_cpu_to_be_32(sa->spi); + esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); + + /* set iv */ + uint64_t *iv = (uint64_t *)(esp + 1); + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + *iv = rte_cpu_to_be_64(sa->seq); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + *iv = rte_cpu_to_be_64(sa->seq); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, + "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + } + + if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (ips->security.ol_flags & + RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) { + /* Set the inner esp next protocol for HW trailer */ + m->inner_esp_next_proto = nlp; + m->packet_type |= RTE_PTYPE_TUNNEL_ESP; + } else { + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + } + goto done; + } + + RTE_ASSERT(cop != NULL); + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + uint8_t *aad; + + sym_cop->aead.data.offset = ip_hdr_len + + sizeof(struct rte_esp_hdr) + sa->iv_len; + sym_cop->aead.data.length = pad_payload_len; + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = rte_cpu_to_be_64(sa->seq); + icb->cnt = rte_cpu_to_be_32(1); + + aad = get_aad(m); + memcpy(aad, esp, 8); + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + + sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: + case RTE_CRYPTO_CIPHER_AES_CBC: + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct rte_esp_hdr); + sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct rte_esp_hdr) + sa->iv_len; + sym_cop->cipher.data.length = pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = rte_cpu_to_be_64(sa->seq); + icb->cnt = rte_cpu_to_be_32(1); + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) + + sa->iv_len + pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } + +done: + return 0; +} + +int +esp_outbound_post(struct rte_mbuf *m, + struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + enum rte_security_session_action_type type; + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + + type = ipsec_get_action_type(sa); + + if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { + m->ol_flags |= PKT_TX_SEC_OFFLOAD; + } else { + RTE_ASSERT(cop != NULL); + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", + __func__); + return -1; + } + } + + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/esp.h b/src/spdk/dpdk/examples/ipsec-secgw/esp.h new file mode 100644 index 000000000..792312cf7 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/esp.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ +#ifndef __RTE_IPSEC_XFORM_ESP_H__ +#define __RTE_IPSEC_XFORM_ESP_H__ + +struct mbuf; + + +int +esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_outbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +#endif /* __RTE_IPSEC_XFORM_ESP_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/event_helper.c b/src/spdk/dpdk/examples/ipsec-secgw/event_helper.c new file mode 100644 index 000000000..865dc911b --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/event_helper.c @@ -0,0 +1,1830 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell International Ltd. + */ +#include <rte_bitmap.h> +#include <rte_ethdev.h> +#include <rte_eventdev.h> +#include <rte_event_eth_rx_adapter.h> +#include <rte_event_eth_tx_adapter.h> +#include <rte_malloc.h> +#include <stdbool.h> + +#include "event_helper.h" + +static volatile bool eth_core_running; + +static int +eh_get_enabled_cores(struct rte_bitmap *eth_core_mask) +{ + int i, count = 0; + + RTE_LCORE_FOREACH(i) { + /* Check if this core is enabled in core mask*/ + if (rte_bitmap_get(eth_core_mask, i)) { + /* Found enabled core */ + count++; + } + } + return count; +} + +static inline unsigned int +eh_get_next_eth_core(struct eventmode_conf *em_conf) +{ + static unsigned int prev_core = -1; + unsigned int next_core; + + /* + * Make sure we have at least one eth core running, else the following + * logic would lead to an infinite loop. + */ + if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) { + EH_LOG_ERR("No enabled eth core found"); + return RTE_MAX_LCORE; + } + + /* Only some cores are marked as eth cores, skip others */ + do { + /* Get the next core */ + next_core = rte_get_next_lcore(prev_core, 0, 1); + + /* Check if we have reached max lcores */ + if (next_core == RTE_MAX_LCORE) + return next_core; + + /* Update prev_core */ + prev_core = next_core; + } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core))); + + return next_core; +} + +static inline unsigned int +eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core) +{ + unsigned int next_core; + + /* Get next active core skipping cores reserved as eth cores */ + do { + /* Get the next core */ + next_core = rte_get_next_lcore(prev_core, 0, 0); + + /* Check if we have reached max lcores */ + if (next_core == RTE_MAX_LCORE) + return next_core; + + prev_core = next_core; + } while (rte_bitmap_get(em_conf->eth_core_mask, next_core)); + + return next_core; +} + +static struct eventdev_params * +eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id) +{ + int i; + + for (i = 0; i < em_conf->nb_eventdev; i++) { + if (em_conf->eventdev_config[i].eventdev_id == eventdev_id) + break; + } + + /* No match */ + if (i == em_conf->nb_eventdev) + return NULL; + + return &(em_conf->eventdev_config[i]); +} + +static inline bool +eh_dev_has_rx_internal_port(uint8_t eventdev_id) +{ + bool flag = true; + int j, ret; + + RTE_ETH_FOREACH_DEV(j) { + uint32_t caps = 0; + + ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps); + if (ret < 0) + return false; + + if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) + flag = false; + } + return flag; +} + +static inline bool +eh_dev_has_tx_internal_port(uint8_t eventdev_id) +{ + bool flag = true; + int j, ret; + + RTE_ETH_FOREACH_DEV(j) { + uint32_t caps = 0; + + ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps); + if (ret < 0) + return false; + + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + flag = false; + } + return flag; +} + +static inline bool +eh_dev_has_burst_mode(uint8_t dev_id) +{ + struct rte_event_dev_info dev_info; + + rte_event_dev_info_get(dev_id, &dev_info); + return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ? + true : false; +} + +static int +eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +{ + int lcore_count, nb_eventdev, nb_eth_dev, ret; + struct eventdev_params *eventdev_config; + struct rte_event_dev_info dev_info; + + /* Get the number of event devices */ + nb_eventdev = rte_event_dev_count(); + if (nb_eventdev == 0) { + EH_LOG_ERR("No event devices detected"); + return -EINVAL; + } + + if (nb_eventdev != 1) { + EH_LOG_ERR("Event mode does not support multiple event devices. " + "Please provide only one event device."); + return -EINVAL; + } + + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + if (nb_eth_dev == 0) { + EH_LOG_ERR("No eth devices detected"); + return -EINVAL; + } + + /* Get the number of lcores */ + lcore_count = rte_lcore_count(); + + /* Read event device info */ + ret = rte_event_dev_info_get(0, &dev_info); + if (ret < 0) { + EH_LOG_ERR("Failed to read event device info %d", ret); + return ret; + } + + /* Check if enough ports are available */ + if (dev_info.max_event_ports < 2) { + EH_LOG_ERR("Not enough event ports available"); + return -EINVAL; + } + + /* Get the first event dev conf */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Save number of queues & ports available */ + eventdev_config->eventdev_id = 0; + eventdev_config->nb_eventqueue = dev_info.max_event_queues; + eventdev_config->nb_eventport = dev_info.max_event_ports; + eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES; + + /* Check if there are more queues than required */ + if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) { + /* One queue is reserved for Tx */ + eventdev_config->nb_eventqueue = nb_eth_dev + 1; + } + + /* Check if there are more ports than required */ + if (eventdev_config->nb_eventport > lcore_count) { + /* One port per lcore is enough */ + eventdev_config->nb_eventport = lcore_count; + } + + /* Update the number of event devices */ + em_conf->nb_eventdev++; + + return 0; +} + +static void +eh_do_capability_check(struct eventmode_conf *em_conf) +{ + struct eventdev_params *eventdev_config; + int all_internal_ports = 1; + uint32_t eventdev_id; + int i; + + for (i = 0; i < em_conf->nb_eventdev; i++) { + + /* Get the event dev conf */ + eventdev_config = &(em_conf->eventdev_config[i]); + eventdev_id = eventdev_config->eventdev_id; + + /* Check if event device has internal port for Rx & Tx */ + if (eh_dev_has_rx_internal_port(eventdev_id) && + eh_dev_has_tx_internal_port(eventdev_id)) { + eventdev_config->all_internal_ports = 1; + } else { + all_internal_ports = 0; + } + } + + /* + * If Rx & Tx internal ports are supported by all event devices then + * eth cores won't be required. Override the eth core mask requested + * and decrement number of event queues by one as it won't be needed + * for Tx. + */ + if (all_internal_ports) { + rte_bitmap_reset(em_conf->eth_core_mask); + for (i = 0; i < em_conf->nb_eventdev; i++) + em_conf->eventdev_config[i].nb_eventqueue--; + } +} + +static int +eh_set_default_conf_link(struct eventmode_conf *em_conf) +{ + struct eventdev_params *eventdev_config; + struct eh_event_link_info *link; + unsigned int lcore_id = -1; + int i, link_index; + + /* + * Create a 1:1 mapping from event ports to cores. If the number + * of event ports is lesser than the cores, some cores won't + * execute worker. If there are more event ports, then some ports + * won't be used. + * + */ + + /* + * The event queue-port mapping is done according to the link. Since + * we are falling back to the default link config, enabling + * "all_ev_queue_to_ev_port" mode flag. This will map all queues + * to the port. + */ + em_conf->ext_params.all_ev_queue_to_ev_port = 1; + + /* Get first event dev conf */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Loop through the ports */ + for (i = 0; i < eventdev_config->nb_eventport; i++) { + + /* Get next active core id */ + lcore_id = eh_get_next_active_core(em_conf, + lcore_id); + + if (lcore_id == RTE_MAX_LCORE) { + /* Reached max cores */ + return 0; + } + + /* Save the current combination as one link */ + + /* Get the index */ + link_index = em_conf->nb_link; + + /* Get the corresponding link */ + link = &(em_conf->link[link_index]); + + /* Save link */ + link->eventdev_id = eventdev_config->eventdev_id; + link->event_port_id = i; + link->lcore_id = lcore_id; + + /* + * Don't set eventq_id as by default all queues + * need to be mapped to the port, which is controlled + * by the operating mode. + */ + + /* Update number of links */ + em_conf->nb_link++; + } + + return 0; +} + +static int +eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) +{ + struct rx_adapter_connection_info *conn; + struct eventdev_params *eventdev_config; + struct rx_adapter_conf *adapter; + bool rx_internal_port = true; + bool single_ev_queue = false; + int nb_eventqueue; + uint32_t caps = 0; + int eventdev_id; + int nb_eth_dev; + int adapter_id; + int conn_id; + int ret; + int i; + + /* Create one adapter with eth queues mapped to event queue(s) */ + + if (em_conf->nb_eventdev == 0) { + EH_LOG_ERR("No event devs registered"); + return -EINVAL; + } + + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + + /* Use the first event dev */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Get eventdev ID */ + eventdev_id = eventdev_config->eventdev_id; + adapter_id = 0; + + /* Get adapter conf */ + adapter = &(em_conf->rx_adapter[adapter_id]); + + /* Set adapter conf */ + adapter->eventdev_id = eventdev_id; + adapter->adapter_id = adapter_id; + + /* + * If event device does not have internal ports for passing + * packets then reserved one queue for Tx path + */ + nb_eventqueue = eventdev_config->all_internal_ports ? + eventdev_config->nb_eventqueue : + eventdev_config->nb_eventqueue - 1; + + /* + * Map all queues of eth device (port) to an event queue. If there + * are more event queues than eth ports then create 1:1 mapping. + * Otherwise map all eth ports to a single event queue. + */ + if (nb_eth_dev > nb_eventqueue) + single_ev_queue = true; + + for (i = 0; i < nb_eth_dev; i++) { + + /* Use only the ports enabled */ + if ((em_conf->eth_portmask & (1 << i)) == 0) + continue; + + /* Get the connection id */ + conn_id = adapter->nb_connections; + + /* Get the connection */ + conn = &(adapter->conn[conn_id]); + + /* Set mapping between eth ports & event queues*/ + conn->ethdev_id = i; + conn->eventq_id = single_ev_queue ? 0 : i; + + /* Add all eth queues eth port to event queue */ + conn->ethdev_rx_qid = -1; + + /* Get Rx adapter capabilities */ + ret = rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps); + if (ret < 0) { + EH_LOG_ERR("Failed to get event device %d eth rx adapter" + " capabilities for port %d", eventdev_id, i); + return ret; + } + if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) + rx_internal_port = false; + + /* Update no of connections */ + adapter->nb_connections++; + + } + + if (rx_internal_port) { + /* Rx core is not required */ + adapter->rx_core_id = -1; + } else { + /* Rx core is required */ + adapter->rx_core_id = eh_get_next_eth_core(em_conf); + } + + /* We have setup one adapter */ + em_conf->nb_rx_adapter = 1; + + return 0; +} + +static int +eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf) +{ + struct tx_adapter_connection_info *conn; + struct eventdev_params *eventdev_config; + struct tx_adapter_conf *tx_adapter; + bool tx_internal_port = true; + uint32_t caps = 0; + int eventdev_id; + int adapter_id; + int nb_eth_dev; + int conn_id; + int ret; + int i; + + /* + * Create one Tx adapter with all eth queues mapped to event queues + * 1:1. + */ + + if (em_conf->nb_eventdev == 0) { + EH_LOG_ERR("No event devs registered"); + return -EINVAL; + } + + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + + /* Use the first event dev */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Get eventdev ID */ + eventdev_id = eventdev_config->eventdev_id; + adapter_id = 0; + + /* Get adapter conf */ + tx_adapter = &(em_conf->tx_adapter[adapter_id]); + + /* Set adapter conf */ + tx_adapter->eventdev_id = eventdev_id; + tx_adapter->adapter_id = adapter_id; + + /* + * Map all Tx queues of the eth device (port) to the event device. + */ + + /* Set defaults for connections */ + + /* + * One eth device (port) is one connection. Map all Tx queues + * of the device to the Tx adapter. + */ + + for (i = 0; i < nb_eth_dev; i++) { + + /* Use only the ports enabled */ + if ((em_conf->eth_portmask & (1 << i)) == 0) + continue; + + /* Get the connection id */ + conn_id = tx_adapter->nb_connections; + + /* Get the connection */ + conn = &(tx_adapter->conn[conn_id]); + + /* Add ethdev to connections */ + conn->ethdev_id = i; + + /* Add all eth tx queues to adapter */ + conn->ethdev_tx_qid = -1; + + /* Get Tx adapter capabilities */ + ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps); + if (ret < 0) { + EH_LOG_ERR("Failed to get event device %d eth tx adapter" + " capabilities for port %d", eventdev_id, i); + return ret; + } + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + tx_internal_port = false; + + /* Update no of connections */ + tx_adapter->nb_connections++; + } + + if (tx_internal_port) { + /* Tx core is not required */ + tx_adapter->tx_core_id = -1; + } else { + /* Tx core is required */ + tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf); + + /* + * Use one event queue per adapter for submitting packets + * for Tx. Reserving the last queue available + */ + /* Queue numbers start at 0 */ + tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1; + } + + /* We have setup one adapter */ + em_conf->nb_tx_adapter = 1; + return 0; +} + +static int +eh_validate_conf(struct eventmode_conf *em_conf) +{ + int ret; + + /* + * Check if event devs are specified. Else probe the event devices + * and initialize the config with all ports & queues available + */ + if (em_conf->nb_eventdev == 0) { + ret = eh_set_default_conf_eventdev(em_conf); + if (ret != 0) + return ret; + } + + /* Perform capability check for the selected event devices */ + eh_do_capability_check(em_conf); + + /* + * Check if links are specified. Else generate a default config for + * the event ports used. + */ + if (em_conf->nb_link == 0) { + ret = eh_set_default_conf_link(em_conf); + if (ret != 0) + return ret; + } + + /* + * Check if rx adapters are specified. Else generate a default config + * with one rx adapter and all eth queues - event queue mapped. + */ + if (em_conf->nb_rx_adapter == 0) { + ret = eh_set_default_conf_rx_adapter(em_conf); + if (ret != 0) + return ret; + } + + /* + * Check if tx adapters are specified. Else generate a default config + * with one tx adapter. + */ + if (em_conf->nb_tx_adapter == 0) { + ret = eh_set_default_conf_tx_adapter(em_conf); + if (ret != 0) + return ret; + } + + return 0; +} + +static int +eh_initialize_eventdev(struct eventmode_conf *em_conf) +{ + struct rte_event_queue_conf eventq_conf = {0}; + struct rte_event_dev_info evdev_default_conf; + struct rte_event_dev_config eventdev_conf; + struct eventdev_params *eventdev_config; + int nb_eventdev = em_conf->nb_eventdev; + struct eh_event_link_info *link; + uint8_t *queue = NULL; + uint8_t eventdev_id; + int nb_eventqueue; + uint8_t i, j; + int ret; + + for (i = 0; i < nb_eventdev; i++) { + + /* Get eventdev config */ + eventdev_config = &(em_conf->eventdev_config[i]); + + /* Get event dev ID */ + eventdev_id = eventdev_config->eventdev_id; + + /* Get the number of queues */ + nb_eventqueue = eventdev_config->nb_eventqueue; + + /* Reset the default conf */ + memset(&evdev_default_conf, 0, + sizeof(struct rte_event_dev_info)); + + /* Get default conf of eventdev */ + ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); + if (ret < 0) { + EH_LOG_ERR( + "Error in getting event device info[devID:%d]", + eventdev_id); + return ret; + } + + memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config)); + eventdev_conf.nb_events_limit = + evdev_default_conf.max_num_events; + eventdev_conf.nb_event_queues = nb_eventqueue; + eventdev_conf.nb_event_ports = + eventdev_config->nb_eventport; + eventdev_conf.nb_event_queue_flows = + evdev_default_conf.max_event_queue_flows; + eventdev_conf.nb_event_port_dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + eventdev_conf.nb_event_port_enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + /* Configure event device */ + ret = rte_event_dev_configure(eventdev_id, &eventdev_conf); + if (ret < 0) { + EH_LOG_ERR("Error in configuring event device"); + return ret; + } + + /* Configure event queues */ + for (j = 0; j < nb_eventqueue; j++) { + + memset(&eventq_conf, 0, + sizeof(struct rte_event_queue_conf)); + + /* Per event dev queues can be ATQ or SINGLE LINK */ + eventq_conf.event_queue_cfg = + eventdev_config->ev_queue_mode; + /* + * All queues need to be set with sched_type as + * schedule type for the application stage. One + * queue would be reserved for the final eth tx + * stage if event device does not have internal + * ports. This will be an atomic queue. + */ + if (!eventdev_config->all_internal_ports && + j == nb_eventqueue-1) { + eventq_conf.schedule_type = + RTE_SCHED_TYPE_ATOMIC; + } else { + eventq_conf.schedule_type = + em_conf->ext_params.sched_type; + } + + /* Set max atomic flows to 1024 */ + eventq_conf.nb_atomic_flows = 1024; + eventq_conf.nb_atomic_order_sequences = 1024; + + /* Setup the queue */ + ret = rte_event_queue_setup(eventdev_id, j, + &eventq_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to setup event queue %d", + ret); + return ret; + } + } + + /* Configure event ports */ + for (j = 0; j < eventdev_config->nb_eventport; j++) { + ret = rte_event_port_setup(eventdev_id, j, NULL); + if (ret < 0) { + EH_LOG_ERR("Failed to setup event port %d", + ret); + return ret; + } + } + } + + /* Make event queue - event port link */ + for (j = 0; j < em_conf->nb_link; j++) { + + /* Get link info */ + link = &(em_conf->link[j]); + + /* Get event dev ID */ + eventdev_id = link->eventdev_id; + + /* + * If "all_ev_queue_to_ev_port" params flag is selected, all + * queues need to be mapped to the port. + */ + if (em_conf->ext_params.all_ev_queue_to_ev_port) + queue = NULL; + else + queue = &(link->eventq_id); + + /* Link queue to port */ + ret = rte_event_port_link(eventdev_id, link->event_port_id, + queue, NULL, 1); + if (ret < 0) { + EH_LOG_ERR("Failed to link event port %d", ret); + return ret; + } + } + + /* Start event devices */ + for (i = 0; i < nb_eventdev; i++) { + + /* Get eventdev config */ + eventdev_config = &(em_conf->eventdev_config[i]); + + ret = rte_event_dev_start(eventdev_config->eventdev_id); + if (ret < 0) { + EH_LOG_ERR("Failed to start event device %d, %d", + i, ret); + return ret; + } + } + return 0; +} + +static int +eh_rx_adapter_configure(struct eventmode_conf *em_conf, + struct rx_adapter_conf *adapter) +{ + struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; + struct rte_event_dev_info evdev_default_conf = {0}; + struct rte_event_port_conf port_conf = {0}; + struct rx_adapter_connection_info *conn; + uint8_t eventdev_id; + uint32_t service_id; + int ret; + int j; + + /* Get event dev ID */ + eventdev_id = adapter->eventdev_id; + + /* Get default configuration of event dev */ + ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to get event dev info %d", ret); + return ret; + } + + /* Setup port conf */ + port_conf.new_event_threshold = 1200; + port_conf.dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + port_conf.enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + /* Create Rx adapter */ + ret = rte_event_eth_rx_adapter_create(adapter->adapter_id, + adapter->eventdev_id, &port_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to create rx adapter %d", ret); + return ret; + } + + /* Setup various connections in the adapter */ + for (j = 0; j < adapter->nb_connections; j++) { + /* Get connection */ + conn = &(adapter->conn[j]); + + /* Setup queue conf */ + queue_conf.ev.queue_id = conn->eventq_id; + queue_conf.ev.sched_type = em_conf->ext_params.sched_type; + queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV; + + /* Add queue to the adapter */ + ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id, + conn->ethdev_id, conn->ethdev_rx_qid, + &queue_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to add eth queue to rx adapter %d", + ret); + return ret; + } + } + + /* Get the service ID used by rx adapter */ + ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id, + &service_id); + if (ret != -ESRCH && ret < 0) { + EH_LOG_ERR("Failed to get service id used by rx adapter %d", + ret); + return ret; + } + + rte_service_set_runstate_mapped_check(service_id, 0); + + /* Start adapter */ + ret = rte_event_eth_rx_adapter_start(adapter->adapter_id); + if (ret < 0) { + EH_LOG_ERR("Failed to start rx adapter %d", ret); + return ret; + } + + return 0; +} + +static int +eh_initialize_rx_adapter(struct eventmode_conf *em_conf) +{ + struct rx_adapter_conf *adapter; + int i, ret; + + /* Configure rx adapters */ + for (i = 0; i < em_conf->nb_rx_adapter; i++) { + adapter = &(em_conf->rx_adapter[i]); + ret = eh_rx_adapter_configure(em_conf, adapter); + if (ret < 0) { + EH_LOG_ERR("Failed to configure rx adapter %d", ret); + return ret; + } + } + return 0; +} + +static int32_t +eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id) +{ + uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE]; + struct rx_adapter_conf *rx_adapter; + struct tx_adapter_conf *tx_adapter; + int service_count = 0; + int adapter_id; + int32_t ret; + int i; + + EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id); + + /* + * Parse adapter config to check which of all Rx adapters need + * to be handled by this core. + */ + for (i = 0; i < conf->nb_rx_adapter; i++) { + /* Check if we have exceeded the max allowed */ + if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) { + EH_LOG_ERR( + "Exceeded the max allowed adapters per rx core"); + break; + } + + rx_adapter = &(conf->rx_adapter[i]); + if (rx_adapter->rx_core_id != lcore_id) + continue; + + /* Adapter is handled by this core */ + adapter_id = rx_adapter->adapter_id; + + /* Get the service ID for the adapters */ + ret = rte_event_eth_rx_adapter_service_id_get(adapter_id, + &(service_id[service_count])); + + if (ret != -ESRCH && ret < 0) { + EH_LOG_ERR( + "Failed to get service id used by rx adapter"); + return ret; + } + + /* Update service count */ + service_count++; + } + + /* + * Parse adapter config to see which of all Tx adapters need + * to be handled by this core. + */ + for (i = 0; i < conf->nb_tx_adapter; i++) { + /* Check if we have exceeded the max allowed */ + if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) { + EH_LOG_ERR( + "Exceeded the max allowed adapters per tx core"); + break; + } + + tx_adapter = &conf->tx_adapter[i]; + if (tx_adapter->tx_core_id != lcore_id) + continue; + + /* Adapter is handled by this core */ + adapter_id = tx_adapter->adapter_id; + + /* Get the service ID for the adapters */ + ret = rte_event_eth_tx_adapter_service_id_get(adapter_id, + &(service_id[service_count])); + + if (ret != -ESRCH && ret < 0) { + EH_LOG_ERR( + "Failed to get service id used by tx adapter"); + return ret; + } + + /* Update service count */ + service_count++; + } + + eth_core_running = true; + + while (eth_core_running) { + for (i = 0; i < service_count; i++) { + /* Initiate adapter service */ + rte_service_run_iter_on_app_lcore(service_id[i], 0); + } + } + + return 0; +} + +static int32_t +eh_stop_worker_eth_core(void) +{ + if (eth_core_running) { + EH_LOG_INFO("Stopping eth cores"); + eth_core_running = false; + } + return 0; +} + +static struct eh_app_worker_params * +eh_find_worker(uint32_t lcore_id, struct eh_conf *conf, + struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param) +{ + struct eh_app_worker_params curr_conf = { {{0} }, NULL}; + struct eh_event_link_info *link = NULL; + struct eh_app_worker_params *tmp_wrkr; + struct eventmode_conf *em_conf; + uint8_t eventdev_id; + int i; + + /* Get eventmode config */ + em_conf = conf->mode_params; + + /* + * Use event device from the first lcore-event link. + * + * Assumption: All lcore-event links tied to a core are using the + * same event device. In other words, one core would be polling on + * queues of a single event device only. + */ + + /* Get a link for this lcore */ + for (i = 0; i < em_conf->nb_link; i++) { + link = &(em_conf->link[i]); + if (link->lcore_id == lcore_id) + break; + } + + if (link == NULL) { + EH_LOG_ERR("No valid link found for lcore %d", lcore_id); + return NULL; + } + + /* Get event dev ID */ + eventdev_id = link->eventdev_id; + + /* Populate the curr_conf with the capabilities */ + + /* Check for Tx internal port */ + if (eh_dev_has_tx_internal_port(eventdev_id)) + curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; + else + curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT; + + /* Check for burst mode */ + if (eh_dev_has_burst_mode(eventdev_id)) + curr_conf.cap.burst = EH_RX_TYPE_BURST; + else + curr_conf.cap.burst = EH_RX_TYPE_NON_BURST; + + curr_conf.cap.ipsec_mode = conf->ipsec_mode; + + /* Parse the passed list and see if we have matching capabilities */ + + /* Initialize the pointer used to traverse the list */ + tmp_wrkr = app_wrkrs; + + for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) { + + /* Skip this if capabilities are not matching */ + if (tmp_wrkr->cap.u64 != curr_conf.cap.u64) + continue; + + /* If the checks pass, we have a match */ + return tmp_wrkr; + } + + return NULL; +} + +static int +eh_verify_match_worker(struct eh_app_worker_params *match_wrkr) +{ + /* Verify registered worker */ + if (match_wrkr->worker_thread == NULL) { + EH_LOG_ERR("No worker registered"); + return 0; + } + + /* Success */ + return 1; +} + +static uint8_t +eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf, + struct eh_event_link_info **links) +{ + struct eh_event_link_info *link_cache; + struct eventmode_conf *em_conf = NULL; + struct eh_event_link_info *link; + uint8_t lcore_nb_link = 0; + size_t single_link_size; + size_t cache_size; + int index = 0; + int i; + + if (conf == NULL || links == NULL) { + EH_LOG_ERR("Invalid args"); + return -EINVAL; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + if (em_conf == NULL) { + EH_LOG_ERR("Invalid event mode parameters"); + return -EINVAL; + } + + /* Get the number of links registered */ + for (i = 0; i < em_conf->nb_link; i++) { + + /* Get link */ + link = &(em_conf->link[i]); + + /* Check if we have link intended for this lcore */ + if (link->lcore_id == lcore_id) { + + /* Update the number of links for this core */ + lcore_nb_link++; + + } + } + + /* Compute size of one entry to be copied */ + single_link_size = sizeof(struct eh_event_link_info); + + /* Compute size of the buffer required */ + cache_size = lcore_nb_link * sizeof(struct eh_event_link_info); + + /* Compute size of the buffer required */ + link_cache = calloc(1, cache_size); + + /* Get the number of links registered */ + for (i = 0; i < em_conf->nb_link; i++) { + + /* Get link */ + link = &(em_conf->link[i]); + + /* Check if we have link intended for this lcore */ + if (link->lcore_id == lcore_id) { + + /* Cache the link */ + memcpy(&link_cache[index], link, single_link_size); + + /* Update index */ + index++; + } + } + + /* Update the links for application to use the cached links */ + *links = link_cache; + + /* Return the number of cached links */ + return lcore_nb_link; +} + +static int +eh_tx_adapter_configure(struct eventmode_conf *em_conf, + struct tx_adapter_conf *adapter) +{ + struct rte_event_dev_info evdev_default_conf = {0}; + struct rte_event_port_conf port_conf = {0}; + struct tx_adapter_connection_info *conn; + struct eventdev_params *eventdev_config; + uint8_t tx_port_id = 0; + uint8_t eventdev_id; + uint32_t service_id; + int ret, j; + + /* Get event dev ID */ + eventdev_id = adapter->eventdev_id; + + /* Get event device conf */ + eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id); + + /* Create Tx adapter */ + + /* Get default configuration of event dev */ + ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to get event dev info %d", ret); + return ret; + } + + /* Setup port conf */ + port_conf.new_event_threshold = + evdev_default_conf.max_num_events; + port_conf.dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + port_conf.enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + /* Create adapter */ + ret = rte_event_eth_tx_adapter_create(adapter->adapter_id, + adapter->eventdev_id, &port_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to create tx adapter %d", ret); + return ret; + } + + /* Setup various connections in the adapter */ + for (j = 0; j < adapter->nb_connections; j++) { + + /* Get connection */ + conn = &(adapter->conn[j]); + + /* Add queue to the adapter */ + ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id, + conn->ethdev_id, conn->ethdev_tx_qid); + if (ret < 0) { + EH_LOG_ERR("Failed to add eth queue to tx adapter %d", + ret); + return ret; + } + } + + /* + * Check if Tx core is assigned. If Tx core is not assigned then + * the adapter has internal port for submitting Tx packets and + * Tx event queue & port setup is not required + */ + if (adapter->tx_core_id == (uint32_t) (-1)) { + /* Internal port is present */ + goto skip_tx_queue_port_setup; + } + + /* Setup Tx queue & port */ + + /* Get event port used by the adapter */ + ret = rte_event_eth_tx_adapter_event_port_get( + adapter->adapter_id, &tx_port_id); + if (ret) { + EH_LOG_ERR("Failed to get tx adapter port id %d", ret); + return ret; + } + + /* + * Tx event queue is reserved for Tx adapter. Unlink this queue + * from all other ports + * + */ + for (j = 0; j < eventdev_config->nb_eventport; j++) { + rte_event_port_unlink(eventdev_id, j, + &(adapter->tx_ev_queue), 1); + } + + /* Link Tx event queue to Tx port */ + ret = rte_event_port_link(eventdev_id, tx_port_id, + &(adapter->tx_ev_queue), NULL, 1); + if (ret != 1) { + EH_LOG_ERR("Failed to link event queue to port"); + return ret; + } + + /* Get the service ID used by Tx adapter */ + ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id, + &service_id); + if (ret != -ESRCH && ret < 0) { + EH_LOG_ERR("Failed to get service id used by tx adapter %d", + ret); + return ret; + } + + rte_service_set_runstate_mapped_check(service_id, 0); + +skip_tx_queue_port_setup: + /* Start adapter */ + ret = rte_event_eth_tx_adapter_start(adapter->adapter_id); + if (ret < 0) { + EH_LOG_ERR("Failed to start tx adapter %d", ret); + return ret; + } + + return 0; +} + +static int +eh_initialize_tx_adapter(struct eventmode_conf *em_conf) +{ + struct tx_adapter_conf *adapter; + int i, ret; + + /* Configure Tx adapters */ + for (i = 0; i < em_conf->nb_tx_adapter; i++) { + adapter = &(em_conf->tx_adapter[i]); + ret = eh_tx_adapter_configure(em_conf, adapter); + if (ret < 0) { + EH_LOG_ERR("Failed to configure tx adapter %d", ret); + return ret; + } + } + return 0; +} + +static void +eh_display_operating_mode(struct eventmode_conf *em_conf) +{ + char sched_types[][32] = { + "RTE_SCHED_TYPE_ORDERED", + "RTE_SCHED_TYPE_ATOMIC", + "RTE_SCHED_TYPE_PARALLEL", + }; + EH_LOG_INFO("Operating mode:"); + + EH_LOG_INFO("\tScheduling type: \t%s", + sched_types[em_conf->ext_params.sched_type]); + + EH_LOG_INFO(""); +} + +static void +eh_display_event_dev_conf(struct eventmode_conf *em_conf) +{ + char queue_mode[][32] = { + "", + "ATQ (ALL TYPE QUEUE)", + "SINGLE LINK", + }; + char print_buf[256] = { 0 }; + int i; + + EH_LOG_INFO("Event Device Configuration:"); + + for (i = 0; i < em_conf->nb_eventdev; i++) { + sprintf(print_buf, + "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d", + em_conf->eventdev_config[i].eventdev_id, + em_conf->eventdev_config[i].nb_eventqueue, + em_conf->eventdev_config[i].nb_eventport); + sprintf(print_buf + strlen(print_buf), + "\tQueue mode: %s", + queue_mode[em_conf->eventdev_config[i].ev_queue_mode]); + EH_LOG_INFO("%s", print_buf); + } + EH_LOG_INFO(""); +} + +static void +eh_display_rx_adapter_conf(struct eventmode_conf *em_conf) +{ + int nb_rx_adapter = em_conf->nb_rx_adapter; + struct rx_adapter_connection_info *conn; + struct rx_adapter_conf *adapter; + char print_buf[256] = { 0 }; + int i, j; + + EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter); + + for (i = 0; i < nb_rx_adapter; i++) { + adapter = &(em_conf->rx_adapter[i]); + sprintf(print_buf, + "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d", + adapter->adapter_id, + adapter->nb_connections, + adapter->eventdev_id); + if (adapter->rx_core_id == (uint32_t)-1) + sprintf(print_buf + strlen(print_buf), + "\tRx core: %-2s", "[INTERNAL PORT]"); + else if (adapter->rx_core_id == RTE_MAX_LCORE) + sprintf(print_buf + strlen(print_buf), + "\tRx core: %-2s", "[NONE]"); + else + sprintf(print_buf + strlen(print_buf), + "\tRx core: %-2d", adapter->rx_core_id); + + EH_LOG_INFO("%s", print_buf); + + for (j = 0; j < adapter->nb_connections; j++) { + conn = &(adapter->conn[j]); + + sprintf(print_buf, + "\t\tEthdev ID: %-2d", conn->ethdev_id); + + if (conn->ethdev_rx_qid == -1) + sprintf(print_buf + strlen(print_buf), + "\tEth rx queue: %-2s", "ALL"); + else + sprintf(print_buf + strlen(print_buf), + "\tEth rx queue: %-2d", + conn->ethdev_rx_qid); + + sprintf(print_buf + strlen(print_buf), + "\tEvent queue: %-2d", conn->eventq_id); + EH_LOG_INFO("%s", print_buf); + } + } + EH_LOG_INFO(""); +} + +static void +eh_display_tx_adapter_conf(struct eventmode_conf *em_conf) +{ + int nb_tx_adapter = em_conf->nb_tx_adapter; + struct tx_adapter_connection_info *conn; + struct tx_adapter_conf *adapter; + char print_buf[256] = { 0 }; + int i, j; + + EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter); + + for (i = 0; i < nb_tx_adapter; i++) { + adapter = &(em_conf->tx_adapter[i]); + sprintf(print_buf, + "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d", + adapter->adapter_id, + adapter->nb_connections, + adapter->eventdev_id); + if (adapter->tx_core_id == (uint32_t)-1) + sprintf(print_buf + strlen(print_buf), + "\tTx core: %-2s", "[INTERNAL PORT]"); + else if (adapter->tx_core_id == RTE_MAX_LCORE) + sprintf(print_buf + strlen(print_buf), + "\tTx core: %-2s", "[NONE]"); + else + sprintf(print_buf + strlen(print_buf), + "\tTx core: %-2d,\tInput event queue: %-2d", + adapter->tx_core_id, adapter->tx_ev_queue); + + EH_LOG_INFO("%s", print_buf); + + for (j = 0; j < adapter->nb_connections; j++) { + conn = &(adapter->conn[j]); + + sprintf(print_buf, + "\t\tEthdev ID: %-2d", conn->ethdev_id); + + if (conn->ethdev_tx_qid == -1) + sprintf(print_buf + strlen(print_buf), + "\tEth tx queue: %-2s", "ALL"); + else + sprintf(print_buf + strlen(print_buf), + "\tEth tx queue: %-2d", + conn->ethdev_tx_qid); + EH_LOG_INFO("%s", print_buf); + } + } + EH_LOG_INFO(""); +} + +static void +eh_display_link_conf(struct eventmode_conf *em_conf) +{ + struct eh_event_link_info *link; + char print_buf[256] = { 0 }; + int i; + + EH_LOG_INFO("Links configured: %d", em_conf->nb_link); + + for (i = 0; i < em_conf->nb_link; i++) { + link = &(em_conf->link[i]); + + sprintf(print_buf, + "\tEvent dev ID: %-2d\tEvent port: %-2d", + link->eventdev_id, + link->event_port_id); + + if (em_conf->ext_params.all_ev_queue_to_ev_port) + sprintf(print_buf + strlen(print_buf), + "Event queue: %-2s\t", "ALL"); + else + sprintf(print_buf + strlen(print_buf), + "Event queue: %-2d\t", link->eventq_id); + + sprintf(print_buf + strlen(print_buf), + "Lcore: %-2d", link->lcore_id); + EH_LOG_INFO("%s", print_buf); + } + EH_LOG_INFO(""); +} + +struct eh_conf * +eh_conf_init(void) +{ + struct eventmode_conf *em_conf = NULL; + struct eh_conf *conf = NULL; + unsigned int eth_core_id; + void *bitmap = NULL; + uint32_t nb_bytes; + + /* Allocate memory for config */ + conf = calloc(1, sizeof(struct eh_conf)); + if (conf == NULL) { + EH_LOG_ERR("Failed to allocate memory for eventmode helper " + "config"); + return NULL; + } + + /* Set default conf */ + + /* Packet transfer mode: poll */ + conf->mode = EH_PKT_TRANSFER_MODE_POLL; + conf->ipsec_mode = EH_IPSEC_MODE_TYPE_APP; + + /* Keep all ethernet ports enabled by default */ + conf->eth_portmask = -1; + + /* Allocate memory for event mode params */ + conf->mode_params = calloc(1, sizeof(struct eventmode_conf)); + if (conf->mode_params == NULL) { + EH_LOG_ERR("Failed to allocate memory for event mode params"); + goto free_conf; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Allocate and initialize bitmap for eth cores */ + nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE); + if (!nb_bytes) { + EH_LOG_ERR("Failed to get bitmap footprint"); + goto free_em_conf; + } + + bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes, + RTE_CACHE_LINE_SIZE); + if (!bitmap) { + EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n"); + goto free_em_conf; + } + + em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap, + nb_bytes); + if (!em_conf->eth_core_mask) { + EH_LOG_ERR("Failed to initialize bitmap"); + goto free_bitmap; + } + + /* Set schedule type as not set */ + em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET; + + /* Set two cores as eth cores for Rx & Tx */ + + /* Use first core other than master core as Rx core */ + eth_core_id = rte_get_next_lcore(0, /* curr core */ + 1, /* skip master core */ + 0 /* wrap */); + + rte_bitmap_set(em_conf->eth_core_mask, eth_core_id); + + /* Use next core as Tx core */ + eth_core_id = rte_get_next_lcore(eth_core_id, /* curr core */ + 1, /* skip master core */ + 0 /* wrap */); + + rte_bitmap_set(em_conf->eth_core_mask, eth_core_id); + + return conf; + +free_bitmap: + rte_free(bitmap); +free_em_conf: + free(em_conf); +free_conf: + free(conf); + return NULL; +} + +void +eh_conf_uninit(struct eh_conf *conf) +{ + struct eventmode_conf *em_conf = NULL; + + if (!conf || !conf->mode_params) + return; + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Free evenmode configuration memory */ + rte_free(em_conf->eth_core_mask); + free(em_conf); + free(conf); +} + +void +eh_display_conf(struct eh_conf *conf) +{ + struct eventmode_conf *em_conf; + + if (conf == NULL) { + EH_LOG_ERR("Invalid event helper configuration"); + return; + } + + if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT) + return; + + if (conf->mode_params == NULL) { + EH_LOG_ERR("Invalid event mode parameters"); + return; + } + + /* Get eventmode conf */ + em_conf = (struct eventmode_conf *)(conf->mode_params); + + /* Display user exposed operating modes */ + eh_display_operating_mode(em_conf); + + /* Display event device conf */ + eh_display_event_dev_conf(em_conf); + + /* Display Rx adapter conf */ + eh_display_rx_adapter_conf(em_conf); + + /* Display Tx adapter conf */ + eh_display_tx_adapter_conf(em_conf); + + /* Display event-lcore link */ + eh_display_link_conf(em_conf); +} + +int32_t +eh_devs_init(struct eh_conf *conf) +{ + struct eventmode_conf *em_conf; + uint16_t port_id; + int ret; + + if (conf == NULL) { + EH_LOG_ERR("Invalid event helper configuration"); + return -EINVAL; + } + + if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT) + return 0; + + if (conf->mode_params == NULL) { + EH_LOG_ERR("Invalid event mode parameters"); + return -EINVAL; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Eventmode conf would need eth portmask */ + em_conf->eth_portmask = conf->eth_portmask; + + /* Validate the requested config */ + ret = eh_validate_conf(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to validate the requested config %d", ret); + return ret; + } + + /* Display the current configuration */ + eh_display_conf(conf); + + /* Stop eth devices before setting up adapter */ + RTE_ETH_FOREACH_DEV(port_id) { + + /* Use only the ports enabled */ + if ((conf->eth_portmask & (1 << port_id)) == 0) + continue; + + rte_eth_dev_stop(port_id); + } + + /* Setup eventdev */ + ret = eh_initialize_eventdev(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to initialize event dev %d", ret); + return ret; + } + + /* Setup Rx adapter */ + ret = eh_initialize_rx_adapter(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to initialize rx adapter %d", ret); + return ret; + } + + /* Setup Tx adapter */ + ret = eh_initialize_tx_adapter(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to initialize tx adapter %d", ret); + return ret; + } + + /* Start eth devices after setting up adapter */ + RTE_ETH_FOREACH_DEV(port_id) { + + /* Use only the ports enabled */ + if ((conf->eth_portmask & (1 << port_id)) == 0) + continue; + + ret = rte_eth_dev_start(port_id); + if (ret < 0) { + EH_LOG_ERR("Failed to start eth dev %d, %d", + port_id, ret); + return ret; + } + } + + return 0; +} + +int32_t +eh_devs_uninit(struct eh_conf *conf) +{ + struct eventmode_conf *em_conf; + int ret, i, j; + uint16_t id; + + if (conf == NULL) { + EH_LOG_ERR("Invalid event helper configuration"); + return -EINVAL; + } + + if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT) + return 0; + + if (conf->mode_params == NULL) { + EH_LOG_ERR("Invalid event mode parameters"); + return -EINVAL; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Stop and release rx adapters */ + for (i = 0; i < em_conf->nb_rx_adapter; i++) { + + id = em_conf->rx_adapter[i].adapter_id; + ret = rte_event_eth_rx_adapter_stop(id); + if (ret < 0) { + EH_LOG_ERR("Failed to stop rx adapter %d", ret); + return ret; + } + + for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) { + + ret = rte_event_eth_rx_adapter_queue_del(id, + em_conf->rx_adapter[i].conn[j].ethdev_id, -1); + if (ret < 0) { + EH_LOG_ERR( + "Failed to remove rx adapter queues %d", + ret); + return ret; + } + } + + ret = rte_event_eth_rx_adapter_free(id); + if (ret < 0) { + EH_LOG_ERR("Failed to free rx adapter %d", ret); + return ret; + } + } + + /* Stop and release event devices */ + for (i = 0; i < em_conf->nb_eventdev; i++) { + + id = em_conf->eventdev_config[i].eventdev_id; + rte_event_dev_stop(id); + + ret = rte_event_dev_close(id); + if (ret < 0) { + EH_LOG_ERR("Failed to close event dev %d, %d", id, ret); + return ret; + } + } + + /* Stop and release tx adapters */ + for (i = 0; i < em_conf->nb_tx_adapter; i++) { + + id = em_conf->tx_adapter[i].adapter_id; + ret = rte_event_eth_tx_adapter_stop(id); + if (ret < 0) { + EH_LOG_ERR("Failed to stop tx adapter %d", ret); + return ret; + } + + for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) { + + ret = rte_event_eth_tx_adapter_queue_del(id, + em_conf->tx_adapter[i].conn[j].ethdev_id, -1); + if (ret < 0) { + EH_LOG_ERR( + "Failed to remove tx adapter queues %d", + ret); + return ret; + } + } + + ret = rte_event_eth_tx_adapter_free(id); + if (ret < 0) { + EH_LOG_ERR("Failed to free tx adapter %d", ret); + return ret; + } + } + + return 0; +} + +void +eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr, + uint8_t nb_wrkr_param) +{ + struct eh_app_worker_params *match_wrkr; + struct eh_event_link_info *links = NULL; + struct eventmode_conf *em_conf; + uint32_t lcore_id; + uint8_t nb_links; + + if (conf == NULL) { + EH_LOG_ERR("Invalid event helper configuration"); + return; + } + + if (conf->mode_params == NULL) { + EH_LOG_ERR("Invalid event mode parameters"); + return; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Get core ID */ + lcore_id = rte_lcore_id(); + + /* Check if this is eth core */ + if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) { + eh_start_worker_eth_core(em_conf, lcore_id); + return; + } + + if (app_wrkr == NULL || nb_wrkr_param == 0) { + EH_LOG_ERR("Invalid args"); + return; + } + + /* + * This is a regular worker thread. The application registers + * multiple workers with various capabilities. Run worker + * based on the selected capabilities of the event + * device configured. + */ + + /* Get the first matching worker for the event device */ + match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param); + if (match_wrkr == NULL) { + EH_LOG_ERR("Failed to match worker registered for lcore %d", + lcore_id); + goto clean_and_exit; + } + + /* Verify sanity of the matched worker */ + if (eh_verify_match_worker(match_wrkr) != 1) { + EH_LOG_ERR("Failed to validate the matched worker"); + goto clean_and_exit; + } + + /* Get worker links */ + nb_links = eh_get_event_lcore_links(lcore_id, conf, &links); + + /* Launch the worker thread */ + match_wrkr->worker_thread(links, nb_links); + + /* Free links info memory */ + free(links); + +clean_and_exit: + + /* Flag eth_cores to stop, if started */ + eh_stop_worker_eth_core(); +} + +uint8_t +eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id) +{ + struct eventdev_params *eventdev_config; + struct eventmode_conf *em_conf; + + if (conf == NULL) { + EH_LOG_ERR("Invalid event helper configuration"); + return -EINVAL; + } + + if (conf->mode_params == NULL) { + EH_LOG_ERR("Invalid event mode parameters"); + return -EINVAL; + } + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + /* Get event device conf */ + eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id); + + if (eventdev_config == NULL) { + EH_LOG_ERR("Failed to read eventdev config"); + return -EINVAL; + } + + /* + * The last queue is reserved to be used as atomic queue for the + * last stage (eth packet tx stage) + */ + return eventdev_config->nb_eventqueue - 1; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/event_helper.h b/src/spdk/dpdk/examples/ipsec-secgw/event_helper.h new file mode 100644 index 000000000..b65b34336 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/event_helper.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell International Ltd. + */ +#ifndef _EVENT_HELPER_H_ +#define _EVENT_HELPER_H_ + +#include <rte_log.h> + +#define RTE_LOGTYPE_EH RTE_LOGTYPE_USER4 + +#define EH_LOG_ERR(...) \ + RTE_LOG(ERR, EH, \ + RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ + __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__ ,))) + +#define EH_LOG_INFO(...) \ + RTE_LOG(INFO, EH, \ + RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ + __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__ ,))) + +/* Max event devices supported */ +#define EVENT_MODE_MAX_EVENT_DEVS RTE_EVENT_MAX_DEVS + +/* Max Rx adapters supported */ +#define EVENT_MODE_MAX_RX_ADAPTERS RTE_EVENT_MAX_DEVS + +/* Max Tx adapters supported */ +#define EVENT_MODE_MAX_TX_ADAPTERS RTE_EVENT_MAX_DEVS + +/* Max Rx adapter connections */ +#define EVENT_MODE_MAX_CONNECTIONS_PER_ADAPTER 16 + +/* Max Tx adapter connections */ +#define EVENT_MODE_MAX_CONNECTIONS_PER_TX_ADAPTER 16 + +/* Max event queues supported per event device */ +#define EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV RTE_EVENT_MAX_QUEUES_PER_DEV + +/* Max event-lcore links */ +#define EVENT_MODE_MAX_LCORE_LINKS \ + (EVENT_MODE_MAX_EVENT_DEVS * EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV) + +/* Max adapters that one Rx core can handle */ +#define EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE EVENT_MODE_MAX_RX_ADAPTERS + +/* Max adapters that one Tx core can handle */ +#define EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE EVENT_MODE_MAX_TX_ADAPTERS + +/* Used to indicate that queue schedule type is not set */ +#define SCHED_TYPE_NOT_SET 3 + +/** + * Packet transfer mode of the application + */ +enum eh_pkt_transfer_mode { + EH_PKT_TRANSFER_MODE_POLL = 0, + EH_PKT_TRANSFER_MODE_EVENT, +}; + +/** + * Event mode packet rx types + */ +enum eh_rx_types { + EH_RX_TYPE_NON_BURST = 0, + EH_RX_TYPE_BURST +}; + +/** + * Event mode packet tx types + */ +enum eh_tx_types { + EH_TX_TYPE_INTERNAL_PORT = 0, + EH_TX_TYPE_NO_INTERNAL_PORT +}; + +/** + * Event mode ipsec mode types + */ +enum eh_ipsec_mode_types { + EH_IPSEC_MODE_TYPE_APP = 0, + EH_IPSEC_MODE_TYPE_DRIVER +}; + +/* Event dev params */ +struct eventdev_params { + uint8_t eventdev_id; + uint8_t nb_eventqueue; + uint8_t nb_eventport; + uint8_t ev_queue_mode; + uint8_t all_internal_ports; +}; + +/** + * Event-lcore link configuration + */ +struct eh_event_link_info { + uint8_t eventdev_id; + /**< Event device ID */ + uint8_t event_port_id; + /**< Event port ID */ + uint8_t eventq_id; + /**< Event queue to be linked to the port */ + uint8_t lcore_id; + /**< Lcore to be polling on this port */ +}; + +/* Rx adapter connection info */ +struct rx_adapter_connection_info { + uint8_t ethdev_id; + uint8_t eventq_id; + int32_t ethdev_rx_qid; +}; + +/* Rx adapter conf */ +struct rx_adapter_conf { + int32_t eventdev_id; + int32_t adapter_id; + uint32_t rx_core_id; + uint8_t nb_connections; + struct rx_adapter_connection_info + conn[EVENT_MODE_MAX_CONNECTIONS_PER_ADAPTER]; +}; + +/* Tx adapter connection info */ +struct tx_adapter_connection_info { + uint8_t ethdev_id; + int32_t ethdev_tx_qid; +}; + +/* Tx adapter conf */ +struct tx_adapter_conf { + int32_t eventdev_id; + int32_t adapter_id; + uint32_t tx_core_id; + uint8_t nb_connections; + struct tx_adapter_connection_info + conn[EVENT_MODE_MAX_CONNECTIONS_PER_TX_ADAPTER]; + uint8_t tx_ev_queue; +}; + +/* Eventmode conf data */ +struct eventmode_conf { + int nb_eventdev; + /**< No of event devs */ + struct eventdev_params eventdev_config[EVENT_MODE_MAX_EVENT_DEVS]; + /**< Per event dev conf */ + uint8_t nb_rx_adapter; + /**< No of Rx adapters */ + struct rx_adapter_conf rx_adapter[EVENT_MODE_MAX_RX_ADAPTERS]; + /**< Rx adapter conf */ + uint8_t nb_tx_adapter; + /**< No of Tx adapters */ + struct tx_adapter_conf tx_adapter[EVENT_MODE_MAX_TX_ADAPTERS]; + /** Tx adapter conf */ + uint8_t nb_link; + /**< No of links */ + struct eh_event_link_info + link[EVENT_MODE_MAX_LCORE_LINKS]; + /**< Per link conf */ + struct rte_bitmap *eth_core_mask; + /**< Core mask of cores to be used for software Rx and Tx */ + uint32_t eth_portmask; + /**< Mask of the eth ports to be used */ + union { + RTE_STD_C11 + struct { + uint64_t sched_type : 2; + /**< Schedule type */ + uint64_t all_ev_queue_to_ev_port : 1; + /**< + * When enabled, all event queues need to be mapped to + * each event port + */ + }; + uint64_t u64; + } ext_params; + /**< 64 bit field to specify extended params */ +}; + +/** + * Event helper configuration + */ +struct eh_conf { + enum eh_pkt_transfer_mode mode; + /**< Packet transfer mode of the application */ + uint32_t eth_portmask; + /**< + * Mask of the eth ports to be used. This portmask would be + * checked while initializing devices using helper routines. + */ + void *mode_params; + /**< Mode specific parameters */ + + /** Application specific params */ + enum eh_ipsec_mode_types ipsec_mode; + /**< Mode of ipsec run */ +}; + +/* Workers registered by the application */ +struct eh_app_worker_params { + union { + RTE_STD_C11 + struct { + uint64_t burst : 1; + /**< Specify status of rx type burst */ + uint64_t tx_internal_port : 1; + /**< Specify whether tx internal port is available */ + uint64_t ipsec_mode : 1; + /**< Specify ipsec processing level */ + }; + uint64_t u64; + } cap; + /**< Capabilities of this worker */ + void (*worker_thread)(struct eh_event_link_info *links, + uint8_t nb_links); + /**< Worker thread */ +}; + +/** + * Allocate memory for event helper configuration and initialize + * it with default values. + * + * @return + * - pointer to event helper configuration structure on success. + * - NULL on failure. + */ +struct eh_conf * +eh_conf_init(void); + +/** + * Uninitialize event helper configuration and release its memory +. * + * @param conf + * Event helper configuration + */ +void +eh_conf_uninit(struct eh_conf *conf); + +/** + * Initialize event mode devices + * + * Application can call this function to get the event devices, eth devices + * and eth rx & tx adapters initialized according to the default config or + * config populated using the command line args. + * + * Application is expected to initialize the eth devices and then the event + * mode helper subsystem will stop & start eth devices according to its + * requirement. Call to this function should be done after the eth devices + * are successfully initialized. + * + * @param conf + * Event helper configuration + * @return + * - 0 on success. + * - (<0) on failure. + */ +int32_t +eh_devs_init(struct eh_conf *conf); + +/** + * Release event mode devices + * + * Application can call this function to release event devices, + * eth rx & tx adapters according to the config. + * + * Call to this function should be done before application stops + * and closes eth devices. This function will not close and stop + * eth devices. + * + * @param conf + * Event helper configuration + * @return + * - 0 on success. + * - (<0) on failure. + */ +int32_t +eh_devs_uninit(struct eh_conf *conf); + +/** + * Get eventdev tx queue + * + * If the application uses event device which does not support internal port + * then it needs to submit the events to a Tx queue before final transmission. + * This Tx queue will be created internally by the eventmode helper subsystem, + * and application will need its queue ID when it runs the execution loop. + * + * @param mode_conf + * Event helper configuration + * @param eventdev_id + * Event device ID + * @return + * Tx queue ID + */ +uint8_t +eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id); + +/** + * Display event mode configuration + * + * @param conf + * Event helper configuration + */ +void +eh_display_conf(struct eh_conf *conf); + + +/** + * Launch eventmode worker + * + * The application can request the eventmode helper subsystem to launch the + * worker based on the capabilities of event device and the options selected + * while initializing the eventmode. + * + * @param conf + * Event helper configuration + * @param app_wrkr + * List of all the workers registered by application, along with its + * capabilities + * @param nb_wrkr_param + * Number of workers passed by the application + * + */ +void +eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr, + uint8_t nb_wrkr_param); + +#endif /* _EVENT_HELPER_H_ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipip.h b/src/spdk/dpdk/examples/ipsec-secgw/ipip.h new file mode 100644 index 000000000..13b8455c3 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipip.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#ifndef __IPIP_H__ +#define __IPIP_H__ + +#include <stdint.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> + +#include <rte_mbuf.h> + +static inline void * +ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6, + struct ip_addr *src, struct ip_addr *dst) +{ + struct ip *inip4, *outip4; + struct ip6_hdr *inip6, *outip6; + uint8_t ds_ecn; + + inip4 = rte_pktmbuf_mtod(m, struct ip *); + + RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION); + + if (inip4->ip_v == IPVERSION) { + /* XXX This should be done by the forwarding engine instead */ + inip4->ip_ttl -= 1; + if (inip4->ip_sum >= rte_cpu_to_be_16(0xffff - 0x100)) + inip4->ip_sum += rte_cpu_to_be_16(0x100) + 1; + else + inip4->ip_sum += rte_cpu_to_be_16(0x100); + ds_ecn = inip4->ip_tos; + } else { + inip6 = (struct ip6_hdr *)inip4; + /* XXX This should be done by the forwarding engine instead */ + inip6->ip6_hops -= 1; + ds_ecn = ntohl(inip6->ip6_flow) >> 20; + } + + if (is_ipv6) { + offset += sizeof(struct ip6_hdr); + outip6 = (struct ip6_hdr *)rte_pktmbuf_prepend(m, offset); + + RTE_ASSERT(outip6 != NULL); + + /* Per RFC4301 5.1.2.1 */ + outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20); + outip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); + + outip6->ip6_nxt = IPPROTO_ESP; + outip6->ip6_hops = IPDEFTTL; + + memcpy(&outip6->ip6_src.s6_addr, src, 16); + memcpy(&outip6->ip6_dst.s6_addr, dst, 16); + + return outip6; + } + + offset += sizeof(struct ip); + outip4 = (struct ip *)rte_pktmbuf_prepend(m, offset); + + RTE_ASSERT(outip4 != NULL); + + /* Per RFC4301 5.1.2.1 */ + outip4->ip_v = IPVERSION; + outip4->ip_hl = 5; + outip4->ip_tos = ds_ecn; + outip4->ip_len = htons(rte_pktmbuf_data_len(m)); + + outip4->ip_id = 0; + outip4->ip_off = 0; + + outip4->ip_ttl = IPDEFTTL; + outip4->ip_p = IPPROTO_ESP; + + outip4->ip_src.s_addr = src->ip.ip4; + outip4->ip_dst.s_addr = dst->ip.ip4; + m->packet_type &= ~RTE_PTYPE_L4_MASK; + return outip4; +} + +static inline struct ip * +ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, + struct ip_addr *src, struct ip_addr *dst) +{ + return ipip_outbound(m, offset, 0, src, dst); +} + +static inline struct ip6_hdr * +ip6ip_outbound(struct rte_mbuf *m, uint32_t offset, + struct ip_addr *src, struct ip_addr *dst) +{ + return ipip_outbound(m, offset, 1, src, dst); +} + +static inline void +ip4_ecn_setup(struct ip *ip4) +{ + if (ip4->ip_tos & IPTOS_ECN_MASK) { + unsigned long sum; + uint8_t old; + + old = ip4->ip_tos; + ip4->ip_tos |= IPTOS_ECN_CE; + sum = old + (~(*(uint8_t *)&ip4->ip_tos) & 0xff); + sum += rte_be_to_cpu_16(ip4->ip_sum); + sum = (sum & 0xffff) + (sum >> 16); + ip4->ip_sum = rte_cpu_to_be_16(sum + (sum >> 16)); + } +} + +static inline void +ip6_ecn_setup(struct ip6_hdr *ip6) +{ + if ((ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK) + ip6->ip6_flow = htonl(ntohl(ip6->ip6_flow) | + (IPTOS_ECN_CE << 20)); +} + +static inline void +ipip_inbound(struct rte_mbuf *m, uint32_t offset) +{ + struct ip *inip4, *outip4; + struct ip6_hdr *inip6, *outip6; + uint32_t ip_len, set_ecn; + + outip4 = rte_pktmbuf_mtod(m, struct ip*); + + RTE_ASSERT(outip4->ip_v == IPVERSION || outip4->ip_v == IP6_VERSION); + + if (outip4->ip_v == IPVERSION) { + ip_len = sizeof(struct ip); + set_ecn = ((outip4->ip_tos & IPTOS_ECN_CE) == IPTOS_ECN_CE); + } else { + outip6 = (struct ip6_hdr *)outip4; + ip_len = sizeof(struct ip6_hdr); + set_ecn = ntohl(outip6->ip6_flow) >> 20; + set_ecn = ((set_ecn & IPTOS_ECN_CE) == IPTOS_ECN_CE); + } + + inip4 = (struct ip *)rte_pktmbuf_adj(m, offset + ip_len); + RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION); + + /* Check packet is still bigger than IP header (inner) */ + RTE_ASSERT(rte_pktmbuf_pkt_len(m) > ip_len); + + /* RFC4301 5.1.2.1 Note 6 */ + if (inip4->ip_v == IPVERSION) { + if (set_ecn) + ip4_ecn_setup(inip4); + /* XXX This should be done by the forwarding engine instead */ + inip4->ip_ttl -= 1; + if (inip4->ip_sum >= rte_cpu_to_be_16(0xffff - 0x100)) + inip4->ip_sum += rte_cpu_to_be_16(0x100) + 1; + else + inip4->ip_sum += rte_cpu_to_be_16(0x100); + m->packet_type &= ~RTE_PTYPE_L4_MASK; + if (inip4->ip_p == IPPROTO_UDP) + m->packet_type |= RTE_PTYPE_L4_UDP; + else if (inip4->ip_p == IPPROTO_TCP) + m->packet_type |= RTE_PTYPE_L4_TCP; + } else { + inip6 = (struct ip6_hdr *)inip4; + if (set_ecn) + ip6_ecn_setup(inip6); + /* XXX This should be done by the forwarding engine instead */ + inip6->ip6_hops -= 1; + } +} + +#endif /* __IPIP_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c new file mode 100644 index 000000000..f777ce2af --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -0,0 +1,2970 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <inttypes.h> +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> +#include <string.h> +#include <sys/queue.h> +#include <stdarg.h> +#include <errno.h> +#include <signal.h> +#include <getopt.h> + +#include <rte_common.h> +#include <rte_bitmap.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_eal.h> +#include <rte_launch.h> +#include <rte_atomic.h> +#include <rte_cycles.h> +#include <rte_prefetch.h> +#include <rte_lcore.h> +#include <rte_per_lcore.h> +#include <rte_branch_prediction.h> +#include <rte_interrupts.h> +#include <rte_random.h> +#include <rte_debug.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_acl.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> +#include <rte_hash.h> +#include <rte_jhash.h> +#include <rte_cryptodev.h> +#include <rte_security.h> +#include <rte_eventdev.h> +#include <rte_ip.h> +#include <rte_ip_frag.h> + +#include "event_helper.h" +#include "ipsec.h" +#include "ipsec_worker.h" +#include "parser.h" +#include "sad.h" + +volatile bool force_quit; + +#define MAX_JUMBO_PKT_LEN 9600 + +#define MEMPOOL_CACHE_SIZE 256 + +#define CDEV_QUEUE_DESC 2048 +#define CDEV_MAP_ENTRIES 16384 +#define CDEV_MP_CACHE_SZ 64 +#define MAX_QUEUE_PAIRS 1 + +#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +#define MAX_RX_QUEUE_PER_LCORE 16 + +#define MAX_LCORE_PARAMS 1024 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define IPSEC_SECGW_RX_DESC_DEFAULT 1024 +#define IPSEC_SECGW_TX_DESC_DEFAULT 1024 +static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; +static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; + +#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ + (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ + (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ + (addr)->addr_bytes[4], (addr)->addr_bytes[5], \ + 0, 0) + +#define FRAG_TBL_BUCKET_ENTRIES 4 +#define MAX_FRAG_TTL_NS (10LL * NS_PER_S) + +#define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } +}; + +struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; + +#define CMD_LINE_OPT_CONFIG "config" +#define CMD_LINE_OPT_SINGLE_SA "single-sa" +#define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask" +#define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode" +#define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type" +#define CMD_LINE_OPT_RX_OFFLOAD "rxoffload" +#define CMD_LINE_OPT_TX_OFFLOAD "txoffload" +#define CMD_LINE_OPT_REASSEMBLE "reassemble" +#define CMD_LINE_OPT_MTU "mtu" +#define CMD_LINE_OPT_FRAG_TTL "frag-ttl" + +#define CMD_LINE_ARG_EVENT "event" +#define CMD_LINE_ARG_POLL "poll" +#define CMD_LINE_ARG_ORDERED "ordered" +#define CMD_LINE_ARG_ATOMIC "atomic" +#define CMD_LINE_ARG_PARALLEL "parallel" + +enum { + /* long options mapped to a short option */ + + /* first long only option value must be >= 256, so that we won't + * conflict with short options + */ + CMD_LINE_OPT_MIN_NUM = 256, + CMD_LINE_OPT_CONFIG_NUM, + CMD_LINE_OPT_SINGLE_SA_NUM, + CMD_LINE_OPT_CRYPTODEV_MASK_NUM, + CMD_LINE_OPT_TRANSFER_MODE_NUM, + CMD_LINE_OPT_SCHEDULE_TYPE_NUM, + CMD_LINE_OPT_RX_OFFLOAD_NUM, + CMD_LINE_OPT_TX_OFFLOAD_NUM, + CMD_LINE_OPT_REASSEMBLE_NUM, + CMD_LINE_OPT_MTU_NUM, + CMD_LINE_OPT_FRAG_TTL_NUM, +}; + +static const struct option lgopts[] = { + {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, + {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM}, + {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM}, + {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM}, + {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM}, + {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM}, + {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM}, + {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM}, + {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM}, + {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM}, + {NULL, 0, 0, 0} +}; + +uint32_t unprotected_port_mask; +uint32_t single_sa_idx; +/* mask of enabled ports */ +static uint32_t enabled_port_mask; +static uint64_t enabled_cryptodev_mask = UINT64_MAX; +static int32_t promiscuous_on = 1; +static int32_t numa_on = 1; /**< NUMA is enabled by default. */ +static uint32_t nb_lcores; +static uint32_t single_sa; +static uint32_t nb_bufs_in_pool; + +/* + * RX/TX HW offload capabilities to enable/use on ethernet ports. + * By default all capabilities are enabled. + */ +static uint64_t dev_rx_offload = UINT64_MAX; +static uint64_t dev_tx_offload = UINT64_MAX; + +/* + * global values that determine multi-seg policy + */ +static uint32_t frag_tbl_sz; +static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE; +static uint32_t mtu_size = RTE_ETHER_MTU; +static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS; + +/* application wide librte_ipsec/SA parameters */ +struct app_sa_prm app_sa_prm = { + .enable = 0, + .cache_sz = SA_CACHE_SZ + }; +static const char *cfgfile; + +struct lcore_rx_queue { + uint16_t port_id; + uint8_t queue_id; +} __rte_cache_aligned; + +struct lcore_params { + uint16_t port_id; + uint8_t queue_id; + uint8_t lcore_id; +} __rte_cache_aligned; + +static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; + +static struct lcore_params *lcore_params; +static uint16_t nb_lcore_params; + +static struct rte_hash *cdev_map_in; +static struct rte_hash *cdev_map_out; + +struct buffer { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct lcore_conf { + uint16_t nb_rx_queue; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; + struct buffer tx_mbufs[RTE_MAX_ETHPORTS]; + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; + struct { + struct rte_ip_frag_tbl *tbl; + struct rte_mempool *pool_dir; + struct rte_mempool *pool_indir; + struct rte_ip_frag_death_row dr; + } frag; +} __rte_cache_aligned; + +static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +static struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, + .max_rx_pkt_len = RTE_ETHER_MAX_LEN, + .split_hdr_size = 0, + .offloads = DEV_RX_OFFLOAD_CHECKSUM, + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | + ETH_RSS_TCP | ETH_RSS_SCTP, + }, + }, + .txmode = { + .mq_mode = ETH_MQ_TX_NONE, + }, +}; + +struct socket_ctx socket_ctx[NB_SOCKETS]; + +/* + * Determine is multi-segment support required: + * - either frame buffer size is smaller then mtu + * - or reassmeble support is requested + */ +static int +multi_seg_required(void) +{ + return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM > + frame_buf_size || frag_tbl_sz != 0); +} + +static inline void +adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph, + uint32_t l2_len) +{ + uint32_t plen, trim; + + plen = rte_be_to_cpu_16(iph->total_length) + l2_len; + if (plen < m->pkt_len) { + trim = m->pkt_len - plen; + rte_pktmbuf_trim(m, trim); + } +} + +static inline void +adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, + uint32_t l2_len) +{ + uint32_t plen, trim; + + plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len; + if (plen < m->pkt_len) { + trim = m->pkt_len - plen; + rte_pktmbuf_trim(m, trim); + } +} + +static inline void +prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) +{ + const struct rte_ether_hdr *eth; + const struct rte_ipv4_hdr *iph4; + const struct rte_ipv6_hdr *iph6; + + eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + + iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt, + RTE_ETHER_HDR_LEN); + adjust_ipv4_pktlen(pkt, iph4, 0); + + if (iph4->next_proto_id == IPPROTO_ESP) + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + else { + t->ip4.data[t->ip4.num] = &iph4->next_proto_id; + t->ip4.pkts[(t->ip4.num)++] = pkt; + } + pkt->l2_len = 0; + pkt->l3_len = sizeof(*iph4); + pkt->packet_type |= RTE_PTYPE_L3_IPV4; + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + int next_proto; + size_t l3len, ext_len; + uint8_t *p; + + /* get protocol type */ + iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt, + RTE_ETHER_HDR_LEN); + adjust_ipv6_pktlen(pkt, iph6, 0); + + next_proto = iph6->proto; + + /* determine l3 header size up to ESP extension */ + l3len = sizeof(struct ip6_hdr); + p = rte_pktmbuf_mtod(pkt, uint8_t *); + while (next_proto != IPPROTO_ESP && l3len < pkt->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* drop packet when IPv6 header exceeds first segment length */ + if (unlikely(l3len > pkt->data_len)) { + rte_pktmbuf_free(pkt); + return; + } + + if (next_proto == IPPROTO_ESP) + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + else { + t->ip6.data[t->ip6.num] = &iph6->proto; + t->ip6.pkts[(t->ip6.num)++] = pkt; + } + pkt->l2_len = 0; + pkt->l3_len = l3len; + pkt->packet_type |= RTE_PTYPE_L3_IPV6; + } else { + /* Unknown/Unsupported type, drop the packet */ + RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", + rte_be_to_cpu_16(eth->ether_type)); + rte_pktmbuf_free(pkt); + return; + } + + /* Check if the packet has been processed inline. For inline protocol + * processed packets, the metadata in the mbuf can be used to identify + * the security processing done on the packet. The metadata will be + * used to retrieve the application registered userdata associated + * with the security session. + */ + + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + struct ipsec_sa *sa; + struct ipsec_mbuf_metadata *priv; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + pkt->port); + + /* Retrieve the userdata registered. Here, the userdata + * registered is the SA pointer. + */ + + sa = (struct ipsec_sa *) + rte_security_get_userdata(ctx, pkt->udata64); + + if (sa == NULL) { + /* userdata could not be retrieved */ + return; + } + + /* Save SA as priv member in mbuf. This will be used in the + * IPsec selector(SP-SA) check. + */ + + priv = get_priv(pkt); + priv->sa = sa; + } +} + +static inline void +prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, + uint16_t nb_pkts) +{ + int32_t i; + + t->ipsec.num = 0; + t->ip4.num = 0; + t->ip6.num = 0; + + for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], + void *)); + prepare_one_packet(pkts[i], t); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_one_packet(pkts[i], t); +} + +static inline void +prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, + const struct lcore_conf *qconf) +{ + struct ip *ip; + struct rte_ether_hdr *ethhdr; + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + + ethhdr = (struct rte_ether_hdr *) + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + + if (ip->ip_v == IPVERSION) { + pkt->ol_flags |= qconf->outbound.ipv4_offloads; + pkt->l3_len = sizeof(struct ip); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + ip->ip_sum = 0; + + /* calculate IPv4 cksum in SW */ + if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0) + ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip); + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + } else { + pkt->ol_flags |= qconf->outbound.ipv6_offloads; + pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + } + + memcpy(ðhdr->s_addr, ðaddr_tbl[port].src, + sizeof(struct rte_ether_addr)); + memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst, + sizeof(struct rte_ether_addr)); +} + +static inline void +prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port, + const struct lcore_conf *qconf) +{ + int32_t i; + const int32_t prefetch_offset = 2; + + for (i = 0; i < (nb_pkts - prefetch_offset); i++) { + rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); + prepare_tx_pkt(pkts[i], port, qconf); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_tx_pkt(pkts[i], port, qconf); +} + +/* Send burst of packets on an output interface */ +static inline int32_t +send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) +{ + struct rte_mbuf **m_table; + int32_t ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + prepare_tx_burst(m_table, n, port, qconf); + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + if (unlikely(ret < n)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* + * Helper function to fragment and queue for TX one packet. + */ +static inline uint32_t +send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, + uint16_t port, uint8_t proto) +{ + struct buffer *tbl; + uint32_t len, n; + int32_t rc; + + tbl = qconf->tx_mbufs + port; + len = tbl->len; + + /* free space for new fragments */ + if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) { + send_burst(qconf, len, port); + len = 0; + } + + n = RTE_DIM(tbl->m_table) - len; + + if (proto == IPPROTO_IP) + rc = rte_ipv4_fragment_packet(m, tbl->m_table + len, + n, mtu_size, qconf->frag.pool_dir, + qconf->frag.pool_indir); + else + rc = rte_ipv6_fragment_packet(m, tbl->m_table + len, + n, mtu_size, qconf->frag.pool_dir, + qconf->frag.pool_indir); + + if (rc >= 0) + len += rc; + else + RTE_LOG(ERR, IPSEC, + "%s: failed to fragment packet with size %u, " + "error code: %d\n", + __func__, m->pkt_len, rte_errno); + + rte_pktmbuf_free(m); + return len; +} + +/* Enqueue a single packet, and send burst if queue is filled */ +static inline int32_t +send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto) +{ + uint32_t lcore_id; + uint16_t len; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + + if (m->pkt_len <= mtu_size) { + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* need to fragment the packet */ + } else if (frag_tbl_sz > 0) + len = send_fragment_packet(qconf, m, port, proto); + else + rte_pktmbuf_free(m); + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static inline void +inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, + uint16_t lim) +{ + struct rte_mbuf *m; + uint32_t i, j, res, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + res = ip->res[i]; + if (res == BYPASS) { + ip->pkts[j++] = m; + continue; + } + if (res == DISCARD) { + rte_pktmbuf_free(m); + continue; + } + + /* Only check SPI match for processed IPSec packets */ + if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { + rte_pktmbuf_free(m); + continue; + } + + sa_idx = res - 1; + if (!inbound_sa_check(sa, m, sa_idx)) { + rte_pktmbuf_free(m); + continue; + } + ip->pkts[j++] = m; + } + ip->num = j; +} + +static void +split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num) +{ + uint32_t i, n4, n6; + struct ip *ip; + struct rte_mbuf *m; + + n4 = trf->ip4.num; + n6 = trf->ip6.num; + + for (i = 0; i < num; i++) { + + m = mb[i]; + ip = rte_pktmbuf_mtod(m, struct ip *); + + if (ip->ip_v == IPVERSION) { + trf->ip4.pkts[n4] = m; + trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m, + uint8_t *, offsetof(struct ip, ip_p)); + n4++; + } else if (ip->ip_v == IP6_VERSION) { + trf->ip6.pkts[n6] = m; + trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m, + uint8_t *, + offsetof(struct ip6_hdr, ip6_nxt)); + n6++; + } else + rte_pktmbuf_free(m); + } + + trf->ip4.num = n4; + trf->ip6.num = n6; +} + + +static inline void +process_pkts_inbound(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + uint16_t nb_pkts_in, n_ip4, n_ip6; + + n_ip4 = traffic->ip4.num; + n_ip6 = traffic->ip6.num; + + if (app_sa_prm.enable == 0) { + nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.num, MAX_PKT_BURST); + split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in); + } else { + inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts, + traffic->ipsec.saptr, traffic->ipsec.num); + ipsec_process(ipsec_ctx, traffic); + } + + inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4, + n_ip4); + + inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6, + n_ip6); +} + +static inline void +outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, + struct traffic_type *ipsec) +{ + struct rte_mbuf *m; + uint32_t i, j, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + sa_idx = ip->res[i] - 1; + if (ip->res[i] == DISCARD) + rte_pktmbuf_free(m); + else if (ip->res[i] == BYPASS) + ip->pkts[j++] = m; + else { + ipsec->res[ipsec->num] = sa_idx; + ipsec->pkts[ipsec->num++] = m; + } + } + ip->num = j; +} + +static inline void +process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint16_t idx, nb_pkts_out, i; + + /* Drop any IPsec traffic from protected ports */ + for (i = 0; i < traffic->ipsec.num; i++) + rte_pktmbuf_free(traffic->ipsec.pkts[i]); + + traffic->ipsec.num = 0; + + outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec); + + outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec); + + if (app_sa_prm.enable == 0) { + + nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.res, traffic->ipsec.num, + MAX_PKT_BURST); + + for (i = 0; i < nb_pkts_out; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + } else { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + } + } + } else { + outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res, + traffic->ipsec.saptr, traffic->ipsec.num); + ipsec_process(ipsec_ctx, traffic); + } +} + +static inline void +process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint32_t nb_pkts_in, i, idx; + + /* Drop any IPv4 traffic from unprotected ports */ + for (i = 0; i < traffic->ip4.num; i++) + rte_pktmbuf_free(traffic->ip4.pkts[i]); + + traffic->ip4.num = 0; + + /* Drop any IPv6 traffic from unprotected ports */ + for (i = 0; i < traffic->ip6.num; i++) + rte_pktmbuf_free(traffic->ip6.pkts[i]); + + traffic->ip6.num = 0; + + if (app_sa_prm.enable == 0) { + + nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.num, MAX_PKT_BURST); + + for (i = 0; i < nb_pkts_in; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + } else { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + } + } + } else { + inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts, + traffic->ipsec.saptr, traffic->ipsec.num); + ipsec_process(ipsec_ctx, traffic); + } +} + +static inline void +process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint32_t nb_pkts_out, i, n; + struct ip *ip; + + /* Drop any IPsec traffic from protected ports */ + for (i = 0; i < traffic->ipsec.num; i++) + rte_pktmbuf_free(traffic->ipsec.pkts[i]); + + n = 0; + + for (i = 0; i < traffic->ip4.num; i++) { + traffic->ipsec.pkts[n] = traffic->ip4.pkts[i]; + traffic->ipsec.res[n++] = single_sa_idx; + } + + for (i = 0; i < traffic->ip6.num; i++) { + traffic->ipsec.pkts[n] = traffic->ip6.pkts[i]; + traffic->ipsec.res[n++] = single_sa_idx; + } + + traffic->ip4.num = 0; + traffic->ip6.num = 0; + traffic->ipsec.num = n; + + if (app_sa_prm.enable == 0) { + + nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.res, traffic->ipsec.num, + MAX_PKT_BURST); + + /* They all sue the same SA (ip4 or ip6 tunnel) */ + m = traffic->ipsec.pkts[0]; + ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + traffic->ip4.num = nb_pkts_out; + for (i = 0; i < nb_pkts_out; i++) + traffic->ip4.pkts[i] = traffic->ipsec.pkts[i]; + } else { + traffic->ip6.num = nb_pkts_out; + for (i = 0; i < nb_pkts_out; i++) + traffic->ip6.pkts[i] = traffic->ipsec.pkts[i]; + } + } else { + outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res, + traffic->ipsec.saptr, traffic->ipsec.num); + ipsec_process(ipsec_ctx, traffic); + } +} + +static inline int32_t +get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) +{ + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + + priv = get_priv(pkt); + + sa = priv->sa; + if (unlikely(sa == NULL)) { + RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); + goto fail; + } + + if (is_ipv6) + return sa->portid; + + /* else */ + return (sa->portid | RTE_LPM_LOOKUP_SUCCESS); + +fail: + if (is_ipv6) + return -1; + + /* else */ + return 0; +} + +static inline void +route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + uint32_t hop[MAX_PKT_BURST * 2]; + uint32_t dst_ip[MAX_PKT_BURST * 2]; + int32_t pkt_hop = 0; + uint16_t i, offset; + uint16_t lpm_pkts = 0; + + if (nb_pkts == 0) + return; + + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + + for (i = 0; i < nb_pkts; i++) { + if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip, ip_dst); + dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], + uint32_t *, offset); + dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); + lpm_pkts++; + } + } + + rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts); + + lpm_pkts = 0; + + for (i = 0; i < nb_pkts; i++) { + if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { + rte_pktmbuf_free(pkts[i]); + continue; + } + send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); + } +} + +static inline void +route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + int32_t hop[MAX_PKT_BURST * 2]; + uint8_t dst_ip[MAX_PKT_BURST * 2][16]; + uint8_t *ip6_dst; + int32_t pkt_hop = 0; + uint16_t i, offset; + uint16_t lpm_pkts = 0; + + if (nb_pkts == 0) + return; + + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + + for (i = 0; i < nb_pkts; i++) { + if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, + offset); + memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); + lpm_pkts++; + } + } + + rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop, + lpm_pkts); + + lpm_pkts = 0; + + for (i = 0; i < nb_pkts; i++) { + if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if (pkt_hop == -1) { + rte_pktmbuf_free(pkts[i]); + continue; + } + send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6); + } +} + +static inline void +process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, + uint8_t nb_pkts, uint16_t portid) +{ + struct ipsec_traffic traffic; + + prepare_traffic(pkts, &traffic, nb_pkts); + + if (unlikely(single_sa)) { + if (is_unprotected_port(portid)) + process_pkts_inbound_nosp(&qconf->inbound, &traffic); + else + process_pkts_outbound_nosp(&qconf->outbound, &traffic); + } else { + if (is_unprotected_port(portid)) + process_pkts_inbound(&qconf->inbound, &traffic); + else + process_pkts_outbound(&qconf->outbound, &traffic); + } + + route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num); + route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); +} + +static inline void +drain_tx_buffers(struct lcore_conf *qconf) +{ + struct buffer *buf; + uint32_t portid; + + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { + buf = &qconf->tx_mbufs[portid]; + if (buf->len == 0) + continue; + send_burst(qconf, buf->len, portid); + buf->len = 0; + } +} + +static inline void +drain_crypto_buffers(struct lcore_conf *qconf) +{ + uint32_t i; + struct ipsec_ctx *ctx; + + /* drain inbound buffers*/ + ctx = &qconf->inbound; + for (i = 0; i != ctx->nb_qps; i++) { + if (ctx->tbl[i].len != 0) + enqueue_cop_burst(ctx->tbl + i); + } + + /* drain outbound buffers*/ + ctx = &qconf->outbound; + for (i = 0; i != ctx->nb_qps; i++) { + if (ctx->tbl[i].len != 0) + enqueue_cop_burst(ctx->tbl + i); + } +} + +static void +drain_inbound_crypto_queues(const struct lcore_conf *qconf, + struct ipsec_ctx *ctx) +{ + uint32_t n; + struct ipsec_traffic trf; + + if (app_sa_prm.enable == 0) { + + /* dequeue packets from crypto-queue */ + n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts, + RTE_DIM(trf.ipsec.pkts)); + + trf.ip4.num = 0; + trf.ip6.num = 0; + + /* split traffic by ipv4-ipv6 */ + split46_traffic(&trf, trf.ipsec.pkts, n); + } else + ipsec_cqp_process(ctx, &trf); + + /* process ipv4 packets */ + if (trf.ip4.num != 0) { + inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0); + route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num); + } + + /* process ipv6 packets */ + if (trf.ip6.num != 0) { + inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0); + route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num); + } +} + +static void +drain_outbound_crypto_queues(const struct lcore_conf *qconf, + struct ipsec_ctx *ctx) +{ + uint32_t n; + struct ipsec_traffic trf; + + if (app_sa_prm.enable == 0) { + + /* dequeue packets from crypto-queue */ + n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts, + RTE_DIM(trf.ipsec.pkts)); + + trf.ip4.num = 0; + trf.ip6.num = 0; + + /* split traffic by ipv4-ipv6 */ + split46_traffic(&trf, trf.ipsec.pkts, n); + } else + ipsec_cqp_process(ctx, &trf); + + /* process ipv4 packets */ + if (trf.ip4.num != 0) + route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num); + + /* process ipv6 packets */ + if (trf.ip6.num != 0) + route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num); +} + +/* main processing loop */ +void +ipsec_poll_mode_worker(void) +{ + struct rte_mbuf *pkts[MAX_PKT_BURST]; + uint32_t lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; + int32_t i, nb_rx; + uint16_t portid; + uint8_t queueid; + struct lcore_conf *qconf; + int32_t rc, socket_id; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) + / US_PER_S * BURST_TX_DRAIN_US; + struct lcore_rx_queue *rxql; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + rxql = qconf->rx_queue_list; + socket_id = rte_lcore_to_socket_id(lcore_id); + + qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4; + qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6; + qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; + qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; + qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in; + qconf->inbound.cdev_map = cdev_map_in; + qconf->inbound.session_pool = socket_ctx[socket_id].session_pool; + qconf->inbound.session_priv_pool = + socket_ctx[socket_id].session_priv_pool; + qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; + qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; + qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out; + qconf->outbound.cdev_map = cdev_map_out; + qconf->outbound.session_pool = socket_ctx[socket_id].session_pool; + qconf->outbound.session_priv_pool = + socket_ctx[socket_id].session_priv_pool; + qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool; + qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; + + rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); + if (rc != 0) { + RTE_LOG(ERR, IPSEC, + "SAD cache init on lcore %u, failed with code: %d\n", + lcore_id, rc); + return; + } + + if (qconf->nb_rx_queue == 0) { + RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", + lcore_id); + return; + } + + RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->nb_rx_queue; i++) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", + lcore_id, portid, queueid); + } + + while (!force_quit) { + cur_tsc = rte_rdtsc(); + + /* TX queue buffer drain */ + diff_tsc = cur_tsc - prev_tsc; + + if (unlikely(diff_tsc > drain_tsc)) { + drain_tx_buffers(qconf); + drain_crypto_buffers(qconf); + prev_tsc = cur_tsc; + } + + for (i = 0; i < qconf->nb_rx_queue; ++i) { + + /* Read packets from RX queues */ + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, + pkts, MAX_PKT_BURST); + + if (nb_rx > 0) + process_pkts(qconf, pkts, nb_rx, portid); + + /* dequeue and process completed crypto-ops */ + if (is_unprotected_port(portid)) + drain_inbound_crypto_queues(qconf, + &qconf->inbound); + else + drain_outbound_crypto_queues(qconf, + &qconf->outbound); + } + } +} + +int +check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) +{ + uint16_t i; + uint16_t portid; + uint8_t queueid; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params_array[i].port_id; + if (portid == fdir_portid) { + queueid = lcore_params_array[i].queue_id; + if (queueid == fdir_qid) + break; + } + + if (i == nb_lcore_params - 1) + return -1; + } + + return 1; +} + +static int32_t +check_poll_mode_params(struct eh_conf *eh_conf) +{ + uint8_t lcore; + uint16_t portid; + uint16_t i; + int32_t socket_id; + + if (!eh_conf) + return -EINVAL; + + if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL) + return 0; + + if (lcore_params == NULL) { + printf("Error: No port/queue/core mappings\n"); + return -1; + } + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { + printf("error: lcore %hhu is not enabled in " + "lcore mask\n", lcore); + return -1; + } + socket_id = rte_lcore_to_socket_id(lcore); + if (socket_id != 0 && numa_on == 0) { + printf("warning: lcore %hhu is on socket %d " + "with numa off\n", + lcore, socket_id); + } + portid = lcore_params[i].port_id; + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("port %u is not enabled in port mask\n", portid); + return -1; + } + if (!rte_eth_dev_is_valid_port(portid)) { + printf("port %u is not present on the board\n", portid); + return -1; + } + } + return 0; +} + +static uint8_t +get_port_nb_rx_queues(const uint16_t port) +{ + int32_t queue = -1; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].port_id == port && + lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } + return (uint8_t)(++queue); +} + +static int32_t +init_lcore_rx_queues(void) +{ + uint16_t i, nb_rx_queue; + uint8_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].nb_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", + nb_rx_queue + 1, lcore); + return -1; + } + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = + lcore_params[i].port_id; + lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = + lcore_params[i].queue_id; + lcore_conf[lcore].nb_rx_queue++; + } + return 0; +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + fprintf(stderr, "%s [EAL options] --" + " -p PORTMASK" + " [-P]" + " [-u PORTMASK]" + " [-j FRAMESIZE]" + " [-l]" + " [-w REPLAY_WINDOW_SIZE]" + " [-e]" + " [-a]" + " [-c]" + " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]" + " -f CONFIG_FILE" + " --config (port,queue,lcore)[,(port,queue,lcore)]" + " [--single-sa SAIDX]" + " [--cryptodev_mask MASK]" + " [--transfer-mode MODE]" + " [--event-schedule-type TYPE]" + " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]" + " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]" + " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]" + " [--" CMD_LINE_OPT_MTU " MTU]" + "\n\n" + " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" + " -P : Enable promiscuous mode\n" + " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n" + " -j FRAMESIZE: Data buffer size, minimum (and default)\n" + " value: RTE_MBUF_DEFAULT_BUF_SIZE\n" + " -l enables code-path that uses librte_ipsec\n" + " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n" + " size for each SA\n" + " -e enables ESN\n" + " -a enables SA SQN atomic behaviour\n" + " -c specifies inbound SAD cache size,\n" + " zero value disables the cache (default value: 128)\n" + " -s number of mbufs in packet pool, if not specified number\n" + " of mbufs will be calculated based on number of cores,\n" + " ports and crypto queues\n" + " -f CONFIG_FILE: Configuration file\n" + " --config (port,queue,lcore): Rx queue configuration. In poll\n" + " mode determines which queues from\n" + " which ports are mapped to which cores.\n" + " In event mode this option is not used\n" + " as packets are dynamically scheduled\n" + " to cores by HW.\n" + " --single-sa SAIDX: In poll mode use single SA index for\n" + " outbound traffic, bypassing the SP\n" + " In event mode selects driver submode,\n" + " SA index value is ignored\n" + " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n" + " devices to configure\n" + " --transfer-mode MODE\n" + " \"poll\" : Packet transfer via polling (default)\n" + " \"event\" : Packet transfer via event device\n" + " --event-schedule-type TYPE queue schedule type, used only when\n" + " transfer mode is set to event\n" + " \"ordered\" : Ordered (default)\n" + " \"atomic\" : Atomic\n" + " \"parallel\" : Parallel\n" + " --" CMD_LINE_OPT_RX_OFFLOAD + ": bitmask of the RX HW offload capabilities to enable/use\n" + " (DEV_RX_OFFLOAD_*)\n" + " --" CMD_LINE_OPT_TX_OFFLOAD + ": bitmask of the TX HW offload capabilities to enable/use\n" + " (DEV_TX_OFFLOAD_*)\n" + " --" CMD_LINE_OPT_REASSEMBLE " NUM" + ": max number of entries in reassemble(fragment) table\n" + " (zero (default value) disables reassembly)\n" + " --" CMD_LINE_OPT_MTU " MTU" + ": MTU value on all ports (default value: 1500)\n" + " outgoing packets with bigger size will be fragmented\n" + " incoming packets with bigger size will be discarded\n" + " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS" + ": fragments lifetime in nanoseconds, default\n" + " and maximum value is 10.000.000.000 ns (10 s)\n" + "\n", + prgname); +} + +static int +parse_mask(const char *str, uint64_t *val) +{ + char *end; + unsigned long t; + + errno = 0; + t = strtoul(str, &end, 0); + if (errno != 0 || end[0] != 0) + return -EINVAL; + + *val = t; + return 0; +} + +static int32_t +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if ((pm == 0) && errno) + return -1; + + return pm; +} + +static int64_t +parse_decimal(const char *str) +{ + char *end = NULL; + uint64_t num; + + num = strtoull(str, &end, 10); + if ((str[0] == '\0') || (end == NULL) || (*end != '\0') + || num > INT64_MAX) + return -1; + + return num; +} + +static int32_t +parse_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_LCORE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int32_t i; + uint32_t size; + + nb_lcore_params = 0; + + while ((p = strchr(p0, '(')) != NULL) { + ++p; + p0 = strchr(p, ')'); + if (p0 == NULL) + return -1; + + size = p0 - p; + if (size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != + _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { + printf("exceeded max number of lcore params: %hu\n", + nb_lcore_params); + return -1; + } + lcore_params_array[nb_lcore_params].port_id = + (uint8_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = + (uint8_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = + (uint8_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; + return 0; +} + +static void +print_app_sa_prm(const struct app_sa_prm *prm) +{ + printf("librte_ipsec usage: %s\n", + (prm->enable == 0) ? "disabled" : "enabled"); + + printf("replay window size: %u\n", prm->window_size); + printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled"); + printf("SA flags: %#" PRIx64 "\n", prm->flags); + printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns); +} + +static int +parse_transfer_mode(struct eh_conf *conf, const char *optarg) +{ + if (!strcmp(CMD_LINE_ARG_POLL, optarg)) + conf->mode = EH_PKT_TRANSFER_MODE_POLL; + else if (!strcmp(CMD_LINE_ARG_EVENT, optarg)) + conf->mode = EH_PKT_TRANSFER_MODE_EVENT; + else { + printf("Unsupported packet transfer mode\n"); + return -EINVAL; + } + + return 0; +} + +static int +parse_schedule_type(struct eh_conf *conf, const char *optarg) +{ + struct eventmode_conf *em_conf = NULL; + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + if (!strcmp(CMD_LINE_ARG_ORDERED, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; + else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC; + else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL; + else { + printf("Unsupported queue schedule type\n"); + return -EINVAL; + } + + return 0; +} + +static int32_t +parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) +{ + int opt; + int64_t ret; + char **argvopt; + int32_t option_index; + char *prgname = argv[0]; + int32_t f_present = 0; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'P': + printf("Promiscuous mode selected\n"); + promiscuous_on = 1; + break; + case 'u': + unprotected_port_mask = parse_portmask(optarg); + if (unprotected_port_mask == 0) { + printf("invalid unprotected portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'f': + if (f_present == 1) { + printf("\"-f\" option present more than " + "once!\n"); + print_usage(prgname); + return -1; + } + cfgfile = optarg; + f_present = 1; + break; + + case 's': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid number of buffers in a pool: " + "%s\n", optarg); + print_usage(prgname); + return -1; + } + + nb_bufs_in_pool = ret; + break; + + case 'j': + ret = parse_decimal(optarg); + if (ret < RTE_MBUF_DEFAULT_BUF_SIZE || + ret > UINT16_MAX) { + printf("Invalid frame buffer size value: %s\n", + optarg); + print_usage(prgname); + return -1; + } + frame_buf_size = ret; + printf("Custom frame buffer size %u\n", frame_buf_size); + break; + case 'l': + app_sa_prm.enable = 1; + break; + case 'w': + app_sa_prm.window_size = parse_decimal(optarg); + break; + case 'e': + app_sa_prm.enable_esn = 1; + break; + case 'a': + app_sa_prm.enable = 1; + app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM; + break; + case 'c': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid SA cache size: %s\n", optarg); + print_usage(prgname); + return -1; + } + app_sa_prm.cache_sz = ret; + break; + case CMD_LINE_OPT_CONFIG_NUM: + ret = parse_config(optarg); + if (ret) { + printf("Invalid config\n"); + print_usage(prgname); + return -1; + } + break; + case CMD_LINE_OPT_SINGLE_SA_NUM: + ret = parse_decimal(optarg); + if (ret == -1 || ret > UINT32_MAX) { + printf("Invalid argument[sa_idx]\n"); + print_usage(prgname); + return -1; + } + + /* else */ + single_sa = 1; + single_sa_idx = ret; + eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; + printf("Configured with single SA index %u\n", + single_sa_idx); + break; + case CMD_LINE_OPT_CRYPTODEV_MASK_NUM: + ret = parse_portmask(optarg); + if (ret == -1) { + printf("Invalid argument[portmask]\n"); + print_usage(prgname); + return -1; + } + + /* else */ + enabled_cryptodev_mask = ret; + break; + + case CMD_LINE_OPT_TRANSFER_MODE_NUM: + ret = parse_transfer_mode(eh_conf, optarg); + if (ret < 0) { + printf("Invalid packet transfer mode\n"); + print_usage(prgname); + return -1; + } + break; + + case CMD_LINE_OPT_SCHEDULE_TYPE_NUM: + ret = parse_schedule_type(eh_conf, optarg); + if (ret < 0) { + printf("Invalid queue schedule type\n"); + print_usage(prgname); + return -1; + } + break; + + case CMD_LINE_OPT_RX_OFFLOAD_NUM: + ret = parse_mask(optarg, &dev_rx_offload); + if (ret != 0) { + printf("Invalid argument for \'%s\': %s\n", + CMD_LINE_OPT_RX_OFFLOAD, optarg); + print_usage(prgname); + return -1; + } + break; + case CMD_LINE_OPT_TX_OFFLOAD_NUM: + ret = parse_mask(optarg, &dev_tx_offload); + if (ret != 0) { + printf("Invalid argument for \'%s\': %s\n", + CMD_LINE_OPT_TX_OFFLOAD, optarg); + print_usage(prgname); + return -1; + } + break; + case CMD_LINE_OPT_REASSEMBLE_NUM: + ret = parse_decimal(optarg); + if (ret < 0 || ret > UINT32_MAX) { + printf("Invalid argument for \'%s\': %s\n", + CMD_LINE_OPT_REASSEMBLE, optarg); + print_usage(prgname); + return -1; + } + frag_tbl_sz = ret; + break; + case CMD_LINE_OPT_MTU_NUM: + ret = parse_decimal(optarg); + if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) { + printf("Invalid argument for \'%s\': %s\n", + CMD_LINE_OPT_MTU, optarg); + print_usage(prgname); + return -1; + } + mtu_size = ret; + break; + case CMD_LINE_OPT_FRAG_TTL_NUM: + ret = parse_decimal(optarg); + if (ret < 0 || ret > MAX_FRAG_TTL_NS) { + printf("Invalid argument for \'%s\': %s\n", + CMD_LINE_OPT_MTU, optarg); + print_usage(prgname); + return -1; + } + frag_ttl_ns = ret; + break; + default: + print_usage(prgname); + return -1; + } + } + + if (f_present == 0) { + printf("Mandatory option \"-f\" not present\n"); + return -1; + } + + /* check do we need to enable multi-seg support */ + if (multi_seg_required()) { + /* legacy mode doesn't support multi-seg */ + app_sa_prm.enable = 1; + printf("frame buf size: %u, mtu: %u, " + "number of reassemble entries: %u\n" + "multi-segment support is required\n", + frame_buf_size, mtu_size, frag_tbl_sz); + } + + print_app_sa_prm(&app_sa_prm); + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 1; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); + printf("%s%s", name, buf); +} + +/* + * Update destination ethaddr for the port. + */ +int +add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) +{ + if (port >= RTE_DIM(ethaddr_tbl)) + return -EINVAL; + + ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr); + return 0; +} + +/* Check the link status of all ports in up to 9s, and print them finally */ +static void +check_all_ports_link_status(uint32_t port_mask) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ + uint16_t portid; + uint8_t count, all_ports_up, print_flag = 0; + struct rte_eth_link link; + int ret; + + printf("\nChecking link status"); + fflush(stdout); + for (count = 0; count <= MAX_CHECK_TIME; count++) { + all_ports_up = 1; + RTE_ETH_FOREACH_DEV(portid) { + if ((port_mask & (1 << portid)) == 0) + continue; + memset(&link, 0, sizeof(link)); + ret = rte_eth_link_get_nowait(portid, &link); + if (ret < 0) { + all_ports_up = 0; + if (print_flag == 1) + printf("Port %u link get failed: %s\n", + portid, rte_strerror(-ret)); + continue; + } + /* print link status if flag set */ + if (print_flag == 1) { + if (link.link_status) + printf( + "Port%d Link Up - speed %u Mbps -%s\n", + portid, link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex")); + else + printf("Port %d Link Down\n", portid); + continue; + } + /* clear all_ports_up flag if any link down */ + if (link.link_status == ETH_LINK_DOWN) { + all_ports_up = 0; + break; + } + } + /* after finally printing all link status, get out */ + if (print_flag == 1) + break; + + if (all_ports_up == 0) { + printf("."); + fflush(stdout); + rte_delay_ms(CHECK_INTERVAL); + } + + /* set the print_flag if all ports up or timeout */ + if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { + print_flag = 1; + printf("done\n"); + } + } +} + +static int32_t +add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id, + uint16_t qp, struct lcore_params *params, + struct ipsec_ctx *ipsec_ctx, + const struct rte_cryptodev_capabilities *cipher, + const struct rte_cryptodev_capabilities *auth, + const struct rte_cryptodev_capabilities *aead) +{ + int32_t ret = 0; + unsigned long i; + struct cdev_key key = { 0 }; + + key.lcore_id = params->lcore_id; + if (cipher) + key.cipher_algo = cipher->sym.cipher.algo; + if (auth) + key.auth_algo = auth->sym.auth.algo; + if (aead) + key.aead_algo = aead->sym.aead.algo; + + ret = rte_hash_lookup(map, &key); + if (ret != -ENOENT) + return 0; + + for (i = 0; i < ipsec_ctx->nb_qps; i++) + if (ipsec_ctx->tbl[i].id == cdev_id) + break; + + if (i == ipsec_ctx->nb_qps) { + if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) { + printf("Maximum number of crypto devices assigned to " + "a core, increase MAX_QP_PER_LCORE value\n"); + return 0; + } + ipsec_ctx->tbl[i].id = cdev_id; + ipsec_ctx->tbl[i].qp = qp; + ipsec_ctx->nb_qps++; + printf("%s cdev mapping: lcore %u using cdev %u qp %u " + "(cdev_id_qp %lu)\n", str, key.lcore_id, + cdev_id, qp, i); + } + + ret = rte_hash_add_key_data(map, &key, (void *)i); + if (ret < 0) { + printf("Faled to insert cdev mapping for (lcore %u, " + "cdev %u, qp %u), errno %d\n", + key.lcore_id, ipsec_ctx->tbl[i].id, + ipsec_ctx->tbl[i].qp, ret); + return 0; + } + + return 1; +} + +static int32_t +add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, + uint16_t qp, struct lcore_params *params) +{ + int32_t ret = 0; + const struct rte_cryptodev_capabilities *i, *j; + struct rte_hash *map; + struct lcore_conf *qconf; + struct ipsec_ctx *ipsec_ctx; + const char *str; + + qconf = &lcore_conf[params->lcore_id]; + + if ((unprotected_port_mask & (1 << params->port_id)) == 0) { + map = cdev_map_out; + ipsec_ctx = &qconf->outbound; + str = "Outbound"; + } else { + map = cdev_map_in; + ipsec_ctx = &qconf->inbound; + str = "Inbound"; + } + + /* Required cryptodevs with operation chainning */ + if (!(dev_info->feature_flags & + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + return ret; + + for (i = dev_info->capabilities; + i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { + if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret |= add_mapping(map, str, cdev_id, qp, params, + ipsec_ctx, NULL, NULL, i); + continue; + } + + if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + for (j = dev_info->capabilities; + j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { + if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + ret |= add_mapping(map, str, cdev_id, qp, params, + ipsec_ctx, i, j, NULL); + } + } + + return ret; +} + +/* Check if the device is enabled by cryptodev_mask */ +static int +check_cryptodev_mask(uint8_t cdev_id) +{ + if (enabled_cryptodev_mask & (1 << cdev_id)) + return 0; + + return -1; +} + +static uint16_t +cryptodevs_init(uint16_t req_queue_num) +{ + struct rte_cryptodev_config dev_conf; + struct rte_cryptodev_qp_conf qp_conf; + uint16_t idx, max_nb_qps, qp, total_nb_qps, i; + int16_t cdev_id; + struct rte_hash_parameters params = { 0 }; + + const uint64_t mseg_flag = multi_seg_required() ? + RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0; + + params.entries = CDEV_MAP_ENTRIES; + params.key_len = sizeof(struct cdev_key); + params.hash_func = rte_jhash; + params.hash_func_init_val = 0; + params.socket_id = rte_socket_id(); + + params.name = "cdev_map_in"; + cdev_map_in = rte_hash_create(¶ms); + if (cdev_map_in == NULL) + rte_panic("Failed to create cdev_map hash table, errno = %d\n", + rte_errno); + + params.name = "cdev_map_out"; + cdev_map_out = rte_hash_create(¶ms); + if (cdev_map_out == NULL) + rte_panic("Failed to create cdev_map hash table, errno = %d\n", + rte_errno); + + printf("lcore/cryptodev/qp mappings:\n"); + + idx = 0; + total_nb_qps = 0; + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { + struct rte_cryptodev_info cdev_info; + + if (check_cryptodev_mask((uint8_t)cdev_id)) + continue; + + rte_cryptodev_info_get(cdev_id, &cdev_info); + + if ((mseg_flag & cdev_info.feature_flags) != mseg_flag) + rte_exit(EXIT_FAILURE, + "Device %hd does not support \'%s\' feature\n", + cdev_id, + rte_cryptodev_get_feature_name(mseg_flag)); + + if (nb_lcore_params > cdev_info.max_nb_queue_pairs) + max_nb_qps = cdev_info.max_nb_queue_pairs; + else + max_nb_qps = nb_lcore_params; + + qp = 0; + i = 0; + while (qp < max_nb_qps && i < nb_lcore_params) { + if (add_cdev_mapping(&cdev_info, cdev_id, qp, + &lcore_params[idx])) + qp++; + idx++; + idx = idx % nb_lcore_params; + i++; + } + + qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp)); + if (qp == 0) + continue; + + total_nb_qps += qp; + dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); + dev_conf.nb_queue_pairs = qp; + dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; + + uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; + if (dev_max_sess != 0 && + dev_max_sess < get_nb_crypto_sessions()) + rte_exit(EXIT_FAILURE, + "Device does not support at least %u " + "sessions", get_nb_crypto_sessions()); + + if (rte_cryptodev_configure(cdev_id, &dev_conf)) + rte_panic("Failed to initialize cryptodev %u\n", + cdev_id); + + qp_conf.nb_descriptors = CDEV_QUEUE_DESC; + qp_conf.mp_session = + socket_ctx[dev_conf.socket_id].session_pool; + qp_conf.mp_session_private = + socket_ctx[dev_conf.socket_id].session_priv_pool; + for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) + if (rte_cryptodev_queue_pair_setup(cdev_id, qp, + &qp_conf, dev_conf.socket_id)) + rte_panic("Failed to setup queue %u for " + "cdev_id %u\n", 0, cdev_id); + + if (rte_cryptodev_start(cdev_id)) + rte_panic("Failed to start cryptodev %u\n", + cdev_id); + } + + printf("\n"); + + return total_nb_qps; +} + +static void +port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) +{ + uint32_t frame_size; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; + uint16_t nb_tx_queue, nb_rx_queue; + uint16_t tx_queueid, rx_queueid, queue, lcore_id; + int32_t ret, socket_id; + struct lcore_conf *qconf; + struct rte_ether_addr ethaddr; + struct rte_eth_conf local_port_conf = port_conf; + + ret = rte_eth_dev_info_get(portid, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + portid, strerror(-ret)); + + /* limit allowed HW offloafs, as user requested */ + dev_info.rx_offload_capa &= dev_rx_offload; + dev_info.tx_offload_capa &= dev_tx_offload; + + printf("Configuring device port %u:\n", portid); + + ret = rte_eth_macaddr_get(portid, ðaddr); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error getting MAC address (port %u): %s\n", + portid, rte_strerror(-ret)); + + ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr); + print_ethaddr("Address: ", ðaddr); + printf("\n"); + + nb_rx_queue = get_port_nb_rx_queues(portid); + nb_tx_queue = nb_lcores; + + if (nb_rx_queue > dev_info.max_rx_queues) + rte_exit(EXIT_FAILURE, "Error: queue %u not available " + "(max rx queue is %u)\n", + nb_rx_queue, dev_info.max_rx_queues); + + if (nb_tx_queue > dev_info.max_tx_queues) + rte_exit(EXIT_FAILURE, "Error: queue %u not available " + "(max tx queue is %u)\n", + nb_tx_queue, dev_info.max_tx_queues); + + printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", + nb_rx_queue, nb_tx_queue); + + frame_size = MTU_TO_FRAMELEN(mtu_size); + if (frame_size > local_port_conf.rxmode.max_rx_pkt_len) + local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + local_port_conf.rxmode.max_rx_pkt_len = frame_size; + + if (multi_seg_required()) { + local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; + local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + } + + local_port_conf.rxmode.offloads |= req_rx_offloads; + local_port_conf.txmode.offloads |= req_tx_offloads; + + /* Check that all required capabilities are supported */ + if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) != + local_port_conf.rxmode.offloads) + rte_exit(EXIT_FAILURE, + "Error: port %u required RX offloads: 0x%" PRIx64 + ", avaialbe RX offloads: 0x%" PRIx64 "\n", + portid, local_port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + + if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) != + local_port_conf.txmode.offloads) + rte_exit(EXIT_FAILURE, + "Error: port %u required TX offloads: 0x%" PRIx64 + ", avaialbe TX offloads: 0x%" PRIx64 "\n", + portid, local_port_conf.txmode.offloads, + dev_info.tx_offload_capa); + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) + local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + + printf("port %u configurng rx_offloads=0x%" PRIx64 + ", tx_offloads=0x%" PRIx64 "\n", + portid, local_port_conf.rxmode.offloads, + local_port_conf.txmode.offloads); + + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + portid, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + + ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, + &local_port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: " + "err=%d, port=%d\n", ret, portid); + + ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: " + "err=%d, port=%d\n", ret, portid); + + /* init one TX queue per lcore */ + tx_queueid = 0; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socket_id = 0; + + /* init TX queue */ + printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); + + txconf = &dev_info.default_txconf; + txconf->offloads = local_port_conf.txmode.offloads; + + ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, + socket_id, txconf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%d\n", ret, portid); + + qconf = &lcore_conf[lcore_id]; + qconf->tx_queue_id[portid] = tx_queueid; + + /* Pre-populate pkt offloads based on capabilities */ + qconf->outbound.ipv4_offloads = PKT_TX_IPV4; + qconf->outbound.ipv6_offloads = PKT_TX_IPV6; + if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM; + + tx_queueid++; + + /* init RX queues */ + for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + + if (portid != qconf->rx_queue_list[queue].port_id) + continue; + + rx_queueid = qconf->rx_queue_list[queue].queue_id; + + printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, + socket_id); + + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = local_port_conf.rxmode.offloads; + ret = rte_eth_rx_queue_setup(portid, rx_queueid, + nb_rxd, socket_id, &rxq_conf, + socket_ctx[socket_id].mbuf_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_rx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + } + } + printf("\n"); +} + +static size_t +max_session_size(void) +{ + size_t max_sz, sz; + void *sec_ctx; + int16_t cdev_id, port_id, n; + + max_sz = 0; + n = rte_cryptodev_count(); + for (cdev_id = 0; cdev_id != n; cdev_id++) { + sz = rte_cryptodev_sym_get_private_session_size(cdev_id); + if (sz > max_sz) + max_sz = sz; + /* + * If crypto device is security capable, need to check the + * size of security session as well. + */ + + /* Get security context of the crypto device */ + sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id); + if (sec_ctx == NULL) + continue; + + /* Get size of security session */ + sz = rte_security_session_get_size(sec_ctx); + if (sz > max_sz) + max_sz = sz; + } + + RTE_ETH_FOREACH_DEV(port_id) { + if ((enabled_port_mask & (1 << port_id)) == 0) + continue; + + sec_ctx = rte_eth_dev_get_sec_ctx(port_id); + if (sec_ctx == NULL) + continue; + + sz = rte_security_session_get_size(sec_ctx); + if (sz > max_sz) + max_sz = sz; + } + + return max_sz; +} + +static void +session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz) +{ + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *sess_mp; + uint32_t nb_sess; + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "sess_mp_%u", socket_id); + /* + * Doubled due to rte_security_session_create() uses one mempool for + * session and for session private data. + */ + nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * + rte_lcore_count()) * 2; + sess_mp = rte_cryptodev_sym_session_pool_create( + mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0, + socket_id); + ctx->session_pool = sess_mp; + + if (ctx->session_pool == NULL) + rte_exit(EXIT_FAILURE, + "Cannot init session pool on socket %d\n", socket_id); + else + printf("Allocated session pool on socket %d\n", socket_id); +} + +static void +session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id, + size_t sess_sz) +{ + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *sess_mp; + uint32_t nb_sess; + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "sess_mp_priv_%u", socket_id); + /* + * Doubled due to rte_security_session_create() uses one mempool for + * session and for session private data. + */ + nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * + rte_lcore_count()) * 2; + sess_mp = rte_mempool_create(mp_name, + nb_sess, + sess_sz, + CDEV_MP_CACHE_SZ, + 0, NULL, NULL, NULL, + NULL, socket_id, + 0); + ctx->session_priv_pool = sess_mp; + + if (ctx->session_priv_pool == NULL) + rte_exit(EXIT_FAILURE, + "Cannot init session priv pool on socket %d\n", + socket_id); + else + printf("Allocated session priv pool on socket %d\n", + socket_id); +} + +static void +pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf) +{ + char s[64]; + int32_t ms; + + snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id); + ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf, + MEMPOOL_CACHE_SIZE, ipsec_metadata_size(), + frame_buf_size, socket_id); + + /* + * if multi-segment support is enabled, then create a pool + * for indirect mbufs. + */ + ms = multi_seg_required(); + if (ms != 0) { + snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id); + ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf, + MEMPOOL_CACHE_SIZE, 0, 0, socket_id); + } + + if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL)) + rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", + socket_id); + else + printf("Allocated mbuf pool on socket %d\n", socket_id); +} + +static inline int +inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md) +{ + struct ipsec_sa *sa; + + /* For inline protocol processing, the metadata in the event will + * uniquely identify the security session which raised the event. + * Application would then need the userdata it had registered with the + * security session to process the event. + */ + + sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md); + + if (sa == NULL) { + /* userdata could not be retrieved */ + return -1; + } + + /* Sequence number over flow. SA need to be re-established */ + RTE_SET_USED(sa); + return 0; +} + +static int +inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type, + void *param, void *ret_param) +{ + uint64_t md; + struct rte_eth_event_ipsec_desc *event_desc = NULL; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx(port_id); + + RTE_SET_USED(param); + + if (type != RTE_ETH_EVENT_IPSEC) + return -1; + + event_desc = ret_param; + if (event_desc == NULL) { + printf("Event descriptor not set\n"); + return -1; + } + + md = event_desc->metadata; + + if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW) + return inline_ipsec_event_esn_overflow(ctx, md); + else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) { + printf("Invalid IPsec event reported\n"); + return -1; + } + + return -1; +} + +static uint16_t +rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue, + struct rte_mbuf *pkt[], uint16_t nb_pkts, + __rte_unused uint16_t max_pkts, void *user_param) +{ + uint64_t tm; + uint32_t i, k; + struct lcore_conf *lc; + struct rte_mbuf *mb; + struct rte_ether_hdr *eth; + + lc = user_param; + k = 0; + tm = 0; + + for (i = 0; i != nb_pkts; i++) { + + mb = pkt[i]; + eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + + struct rte_ipv4_hdr *iph; + + iph = (struct rte_ipv4_hdr *)(eth + 1); + if (rte_ipv4_frag_pkt_is_fragmented(iph)) { + + mb->l2_len = sizeof(*eth); + mb->l3_len = sizeof(*iph); + tm = (tm != 0) ? tm : rte_rdtsc(); + mb = rte_ipv4_frag_reassemble_packet( + lc->frag.tbl, &lc->frag.dr, + mb, tm, iph); + + if (mb != NULL) { + /* fix ip cksum after reassemble. */ + iph = rte_pktmbuf_mtod_offset(mb, + struct rte_ipv4_hdr *, + mb->l2_len); + iph->hdr_checksum = 0; + iph->hdr_checksum = rte_ipv4_cksum(iph); + } + } + } else if (eth->ether_type == + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + + struct rte_ipv6_hdr *iph; + struct ipv6_extension_fragment *fh; + + iph = (struct rte_ipv6_hdr *)(eth + 1); + fh = rte_ipv6_frag_get_ipv6_fragment_header(iph); + if (fh != NULL) { + mb->l2_len = sizeof(*eth); + mb->l3_len = (uintptr_t)fh - (uintptr_t)iph + + sizeof(*fh); + tm = (tm != 0) ? tm : rte_rdtsc(); + mb = rte_ipv6_frag_reassemble_packet( + lc->frag.tbl, &lc->frag.dr, + mb, tm, iph, fh); + if (mb != NULL) + /* fix l3_len after reassemble. */ + mb->l3_len = mb->l3_len - sizeof(*fh); + } + } + + pkt[k] = mb; + k += (mb != NULL); + } + + /* some fragments were encountered, drain death row */ + if (tm != 0) + rte_ip_frag_free_death_row(&lc->frag.dr, 0); + + return k; +} + + +static int +reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid) +{ + int32_t sid; + uint32_t i; + uint64_t frag_cycles; + const struct lcore_rx_queue *rxq; + const struct rte_eth_rxtx_callback *cb; + + /* create fragment table */ + sid = rte_lcore_to_socket_id(cid); + frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) / + NS_PER_S * frag_ttl_ns; + + lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz, + FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid); + if (lc->frag.tbl == NULL) { + printf("%s(%u): failed to create fragment table of size: %u, " + "error code: %d\n", + __func__, cid, frag_tbl_sz, rte_errno); + return -ENOMEM; + } + + /* setup reassemble RX callbacks for all queues */ + for (i = 0; i != lc->nb_rx_queue; i++) { + + rxq = lc->rx_queue_list + i; + cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id, + rx_callback, lc); + if (cb == NULL) { + printf("%s(%u): failed to install RX callback for " + "portid=%u, queueid=%u, error code: %d\n", + __func__, cid, + rxq->port_id, rxq->queue_id, rte_errno); + return -ENOMEM; + } + } + + return 0; +} + +static int +reassemble_init(void) +{ + int32_t rc; + uint32_t i, lc; + + rc = 0; + for (i = 0; i != nb_lcore_params; i++) { + lc = lcore_params[i].lcore_id; + rc = reassemble_lcore_init(lcore_conf + lc, lc); + if (rc != 0) + break; + } + + return rc; +} + +static void +create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads) +{ + struct rte_flow_action action[2]; + struct rte_flow_item pattern[2]; + struct rte_flow_attr attr = {0}; + struct rte_flow_error err; + struct rte_flow *flow; + int ret; + + if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY)) + return; + + /* Add the default rte_flow to enable SECURITY for all ESP packets */ + + pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP; + pattern[0].spec = NULL; + pattern[0].mask = NULL; + pattern[0].last = NULL; + pattern[1].type = RTE_FLOW_ITEM_TYPE_END; + + action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; + action[0].conf = NULL; + action[1].type = RTE_FLOW_ACTION_TYPE_END; + action[1].conf = NULL; + + attr.ingress = 1; + + ret = rte_flow_validate(port_id, &attr, pattern, action, &err); + if (ret) + return; + + flow = rte_flow_create(port_id, &attr, pattern, action, &err); + if (flow == NULL) + return; + + flow_info_tbl[port_id].rx_def_flow = flow; + RTE_LOG(INFO, IPSEC, + "Created default flow enabling SECURITY for all ESP traffic on port %d\n", + port_id); +} + +static void +signal_handler(int signum) +{ + if (signum == SIGINT || signum == SIGTERM) { + printf("\n\nSignal %d received, preparing to exit...\n", + signum); + force_quit = true; + } +} + +static void +ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa) +{ + struct rte_ipsec_session *ips; + int32_t i; + + if (!sa || !nb_sa) + return; + + for (i = 0; i < nb_sa; i++) { + ips = ipsec_get_primary_session(&sa[i]); + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + rte_exit(EXIT_FAILURE, "Event mode supports only " + "inline protocol sessions\n"); + } + +} + +static int32_t +check_event_mode_params(struct eh_conf *eh_conf) +{ + struct eventmode_conf *em_conf = NULL; + struct lcore_params *params; + uint16_t portid; + + if (!eh_conf || !eh_conf->mode_params) + return -EINVAL; + + /* Get eventmode conf */ + em_conf = eh_conf->mode_params; + + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL && + em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) { + printf("error: option --event-schedule-type applies only to " + "event mode\n"); + return -EINVAL; + } + + if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT) + return 0; + + /* Set schedule type to ORDERED if it wasn't explicitly set by user */ + if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; + + /* + * Event mode currently supports only inline protocol sessions. + * If there are other types of sessions configured then exit with + * error. + */ + ev_mode_sess_verify(sa_in, nb_sa_in); + ev_mode_sess_verify(sa_out, nb_sa_out); + + + /* Option --config does not apply to event mode */ + if (nb_lcore_params > 0) { + printf("error: option --config applies only to poll mode\n"); + return -EINVAL; + } + + /* + * In order to use the same port_init routine for both poll and event + * modes initialize lcore_params with one queue for each eth port + */ + lcore_params = lcore_params_array; + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + params = &lcore_params[nb_lcore_params++]; + params->port_id = portid; + params->queue_id = 0; + params->lcore_id = rte_get_next_lcore(0, 0, 1); + } + + return 0; +} + +static void +inline_sessions_free(struct sa_ctx *sa_ctx) +{ + struct rte_ipsec_session *ips; + struct ipsec_sa *sa; + int32_t ret; + uint32_t i; + + if (!sa_ctx) + return; + + for (i = 0; i < sa_ctx->nb_sa; i++) { + + sa = &sa_ctx->sa[i]; + if (!sa->spi) + continue; + + ips = ipsec_get_primary_session(sa); + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL && + ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + continue; + + if (!rte_eth_dev_is_valid_port(sa->portid)) + continue; + + ret = rte_security_session_destroy( + rte_eth_dev_get_sec_ctx(sa->portid), + ips->security.ses); + if (ret) + RTE_LOG(ERR, IPSEC, "Failed to destroy security " + "session type %d, spi %d\n", + ips->type, sa->spi); + } +} + +static uint32_t +calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq, + uint32_t nb_txq) +{ + return RTE_MAX((nb_rxq * nb_rxd + + nb_ports * nb_lcores * MAX_PKT_BURST + + nb_ports * nb_txq * nb_txd + + nb_lcores * MEMPOOL_CACHE_SIZE + + nb_crypto_qp * CDEV_QUEUE_DESC + + nb_lcores * frag_tbl_sz * + FRAG_TBL_BUCKET_ENTRIES), + 8192U); +} + +int32_t +main(int32_t argc, char **argv) +{ + int32_t ret; + uint32_t lcore_id, nb_txq, nb_rxq = 0; + uint32_t cdev_id; + uint32_t i; + uint8_t socket_id; + uint16_t portid, nb_crypto_qp, nb_ports = 0; + uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; + uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; + struct eh_conf *eh_conf = NULL; + size_t sess_sz; + + nb_bufs_in_pool = 0; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + force_quit = false; + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + /* initialize event helper configuration */ + eh_conf = eh_conf_init(); + if (eh_conf == NULL) + rte_exit(EXIT_FAILURE, "Failed to init event helper config"); + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv, eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid parameters\n"); + + /* parse configuration file */ + if (parse_cfg_file(cfgfile) < 0) { + printf("parsing file \"%s\" failed\n", + optarg); + print_usage(argv[0]); + return -1; + } + + if ((unprotected_port_mask & enabled_port_mask) != + unprotected_port_mask) + rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", + unprotected_port_mask); + + if (check_poll_mode_params(eh_conf) < 0) + rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n"); + + if (check_event_mode_params(eh_conf) < 0) + rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n"); + + ret = init_lcore_rx_queues(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); + + nb_lcores = rte_lcore_count(); + + sess_sz = max_session_size(); + + /* + * In event mode request minimum number of crypto queues + * to be reserved equal to number of ports. + */ + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT) + nb_crypto_qp = rte_eth_dev_count_avail(); + else + nb_crypto_qp = 0; + + nb_crypto_qp = cryptodevs_init(nb_crypto_qp); + + if (nb_bufs_in_pool == 0) { + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + nb_ports++; + nb_rxq += get_port_nb_rx_queues(portid); + } + + nb_txq = nb_lcores; + + nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp, + nb_rxq, nb_txq); + } + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socket_id = 0; + + /* mbuf_pool is initialised by the pool_init() function*/ + if (socket_ctx[socket_id].mbuf_pool) + continue; + + pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool); + session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); + session_priv_pool_init(&socket_ctx[socket_id], socket_id, + sess_sz); + } + printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool); + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + sa_check_offloads(portid, &req_rx_offloads[portid], + &req_tx_offloads[portid]); + port_init(portid, req_rx_offloads[portid], + req_tx_offloads[portid]); + } + + /* + * Set the enabled port mask in helper config for use by helper + * sub-system. This will be used while initializing devices using + * helper sub-system. + */ + eh_conf->eth_portmask = enabled_port_mask; + + /* Initialize eventmode components */ + ret = eh_devs_init(eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); + + /* start ports */ + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + /* Create flow before starting the device */ + create_default_ipsec_flow(portid, req_rx_offloads[portid]); + + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%d\n", ret, portid); + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) { + ret = rte_eth_promiscuous_enable(portid); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "rte_eth_promiscuous_enable: err=%s, port=%d\n", + rte_strerror(-ret), portid); + } + + rte_eth_dev_callback_register(portid, + RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); + } + + /* fragment reassemble is enabled */ + if (frag_tbl_sz != 0) { + ret = reassemble_init(); + if (ret != 0) + rte_exit(EXIT_FAILURE, "failed at reassemble init"); + } + + /* Replicate each context per socket */ + for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) { + socket_id = rte_socket_id_by_idx(i); + if ((socket_ctx[socket_id].mbuf_pool != NULL) && + (socket_ctx[socket_id].sa_in == NULL) && + (socket_ctx[socket_id].sa_out == NULL)) { + sa_init(&socket_ctx[socket_id], socket_id); + sp4_init(&socket_ctx[socket_id], socket_id); + sp6_init(&socket_ctx[socket_id], socket_id); + rt_init(&socket_ctx[socket_id], socket_id); + } + } + + check_all_ports_link_status(enabled_port_mask); + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + /* Uninitialize eventmode components */ + ret = eh_devs_uninit(eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret); + + /* Free eventmode configuration memory */ + eh_conf_uninit(eh_conf); + + /* Destroy inline inbound and outbound sessions */ + for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) { + socket_id = rte_socket_id_by_idx(i); + inline_sessions_free(socket_ctx[socket_id].sa_in); + inline_sessions_free(socket_ctx[socket_id].sa_out); + } + + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { + printf("Closing cryptodev %d...", cdev_id); + rte_cryptodev_stop(cdev_id); + rte_cryptodev_close(cdev_id); + printf(" Done\n"); + } + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + printf("Closing port %d...", portid); + if (flow_info_tbl[portid].rx_def_flow) { + struct rte_flow_error err; + + ret = rte_flow_destroy(portid, + flow_info_tbl[portid].rx_def_flow, &err); + if (ret) + RTE_LOG(ERR, IPSEC, "Failed to destroy flow " + " for port %u, err msg: %s\n", portid, + err.message); + } + rte_eth_dev_stop(portid); + rte_eth_dev_close(portid); + printf(" Done\n"); + } + printf("Bye...\n"); + + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.h b/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.h new file mode 100644 index 000000000..4b53cb537 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec-secgw.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell International Ltd. + */ +#ifndef _IPSEC_SECGW_H_ +#define _IPSEC_SECGW_H_ + +#include <stdbool.h> + +#define NB_SOCKETS 4 + +#define MAX_PKT_BURST 32 + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 + +#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((a) & 0xff) << 56) | \ + ((uint64_t)((b) & 0xff) << 48) | \ + ((uint64_t)((c) & 0xff) << 40) | \ + ((uint64_t)((d) & 0xff) << 32) | \ + ((uint64_t)((e) & 0xff) << 24) | \ + ((uint64_t)((f) & 0xff) << 16) | \ + ((uint64_t)((g) & 0xff) << 8) | \ + ((uint64_t)(h) & 0xff)) +#else +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((h) & 0xff) << 56) | \ + ((uint64_t)((g) & 0xff) << 48) | \ + ((uint64_t)((f) & 0xff) << 40) | \ + ((uint64_t)((e) & 0xff) << 32) | \ + ((uint64_t)((d) & 0xff) << 24) | \ + ((uint64_t)((c) & 0xff) << 16) | \ + ((uint64_t)((b) & 0xff) << 8) | \ + ((uint64_t)(a) & 0xff)) +#endif + +#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) + +struct traffic_type { + const uint8_t *data[MAX_PKT_BURST * 2]; + struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; + void *saptr[MAX_PKT_BURST * 2]; + uint32_t res[MAX_PKT_BURST * 2]; + uint32_t num; +}; + +struct ipsec_traffic { + struct traffic_type ipsec; + struct traffic_type ip4; + struct traffic_type ip6; +}; + +/* Fields optimized for devices without burst */ +struct traffic_type_nb { + const uint8_t *data; + struct rte_mbuf *pkt; + uint32_t res; + uint32_t num; +}; + +struct ipsec_traffic_nb { + struct traffic_type_nb ipsec; + struct traffic_type_nb ip4; + struct traffic_type_nb ip6; +}; + +/* port/source ethernet addr and destination ethernet addr */ +struct ethaddr_info { + uint64_t src, dst; +}; + +extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS]; + +/* Port mask to identify the unprotected ports */ +extern uint32_t unprotected_port_mask; + +/* Index of SA in single mode */ +extern uint32_t single_sa_idx; + +extern volatile bool force_quit; + +static inline uint8_t +is_unprotected_port(uint16_t port_id) +{ + return unprotected_port_mask & (1 << port_id); +} + +#endif /* _IPSEC_SECGW_H_ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c new file mode 100644 index 000000000..bf88d80d6 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.c @@ -0,0 +1,749 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2020 Intel Corporation + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#include <rte_branch_prediction.h> +#include <rte_log.h> +#include <rte_crypto.h> +#include <rte_security.h> +#include <rte_cryptodev.h> +#include <rte_ipsec.h> +#include <rte_ethdev.h> +#include <rte_mbuf.h> +#include <rte_hash.h> + +#include "ipsec.h" +#include "esp.h" + +static inline void +set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) +{ + if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + struct rte_security_ipsec_tunnel_param *tunnel = + &ipsec->tunnel; + if (IS_IP4_TUNNEL(sa->flags)) { + tunnel->type = + RTE_SECURITY_IPSEC_TUNNEL_IPV4; + tunnel->ipv4.ttl = IPDEFTTL; + + memcpy((uint8_t *)&tunnel->ipv4.src_ip, + (uint8_t *)&sa->src.ip.ip4, 4); + + memcpy((uint8_t *)&tunnel->ipv4.dst_ip, + (uint8_t *)&sa->dst.ip.ip4, 4); + } else if (IS_IP6_TUNNEL(sa->flags)) { + tunnel->type = + RTE_SECURITY_IPSEC_TUNNEL_IPV6; + tunnel->ipv6.hlimit = IPDEFTTL; + tunnel->ipv6.dscp = 0; + tunnel->ipv6.flabel = 0; + + memcpy((uint8_t *)&tunnel->ipv6.src_addr, + (uint8_t *)&sa->src.ip.ip6.ip6_b, 16); + + memcpy((uint8_t *)&tunnel->ipv6.dst_addr, + (uint8_t *)&sa->dst.ip.ip6.ip6_b, 16); + } + /* TODO support for Transport */ + } + ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT; + ipsec->replay_win_sz = app_sa_prm.window_size; + ipsec->options.esn = app_sa_prm.enable_esn; +} + +int +create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips) +{ + struct rte_cryptodev_info cdev_info; + unsigned long cdev_id_qp = 0; + int32_t ret = 0; + struct cdev_key key = { 0 }; + + key.lcore_id = (uint8_t)rte_lcore_id(); + + key.cipher_algo = (uint8_t)sa->cipher_algo; + key.auth_algo = (uint8_t)sa->auth_algo; + key.aead_algo = (uint8_t)sa->aead_algo; + + ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, + (void **)&cdev_id_qp); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, + "No cryptodev: core %u, cipher_algo %u, " + "auth_algo %u, aead_algo %u\n", + key.lcore_id, + key.cipher_algo, + key.auth_algo, + key.aead_algo); + return -1; + } + + RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev " + "%u qp %u\n", sa->spi, + ipsec_ctx->tbl[cdev_id_qp].id, + ipsec_ctx->tbl[cdev_id_qp].qp); + + if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE && + ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { + struct rte_security_session_conf sess_conf = { + .action_type = ips->type, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .spi = sa->spi, + .salt = sa->salt, + .options = { 0 }, + .replay_win_sz = 0, + .direction = sa->direction, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = (IS_TUNNEL(sa->flags)) ? + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : + RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + } }, + .crypto_xform = sa->xforms, + .userdata = NULL, + + }; + + if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_cryptodev_get_sec_ctx( + ipsec_ctx->tbl[cdev_id_qp].id); + + /* Set IPsec parameters in conf */ + set_ipsec_conf(sa, &(sess_conf.ipsec)); + + ips->security.ses = rte_security_session_create(ctx, + &sess_conf, ipsec_ctx->session_priv_pool); + if (ips->security.ses == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + } else { + RTE_LOG(ERR, IPSEC, "Inline not supported\n"); + return -1; + } + } else { + if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { + struct rte_cryptodev_info info; + uint16_t cdev_id; + + cdev_id = ipsec_ctx->tbl[cdev_id_qp].id; + rte_cryptodev_info_get(cdev_id, &info); + if (!(info.feature_flags & + RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) + return -ENOTSUP; + + ips->crypto.dev_id = cdev_id; + } + ips->crypto.ses = rte_cryptodev_sym_session_create( + ipsec_ctx->session_pool); + rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, + ips->crypto.ses, sa->xforms, + ipsec_ctx->session_priv_pool); + + rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, + &cdev_info); + } + + sa->cdev_id_qp = cdev_id_qp; + + return 0; +} + +int +create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips) +{ + int32_t ret = 0; + struct rte_security_ctx *sec_ctx; + struct rte_security_session_conf sess_conf = { + .action_type = ips->type, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .spi = sa->spi, + .salt = sa->salt, + .options = { 0 }, + .replay_win_sz = 0, + .direction = sa->direction, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = (sa->flags == IP4_TUNNEL || + sa->flags == IP6_TUNNEL) ? + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : + RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + } }, + .crypto_xform = sa->xforms, + .userdata = NULL, + }; + + RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n", + sa->spi, sa->portid); + + if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + struct rte_flow_error err; + const struct rte_security_capability *sec_cap; + int ret = 0; + + sec_ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + sa->portid); + if (sec_ctx == NULL) { + RTE_LOG(ERR, IPSEC, + " rte_eth_dev_get_sec_ctx failed\n"); + return -1; + } + + ips->security.ses = rte_security_session_create(sec_ctx, + &sess_conf, skt_ctx->session_pool); + if (ips->security.ses == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + + sec_cap = rte_security_capabilities_get(sec_ctx); + + /* iterate until ESP tunnel*/ + while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) { + if (sec_cap->action == ips->type && + sec_cap->protocol == + RTE_SECURITY_PROTOCOL_IPSEC && + sec_cap->ipsec.mode == + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && + sec_cap->ipsec.direction == sa->direction) + break; + sec_cap++; + } + + if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { + RTE_LOG(ERR, IPSEC, + "No suitable security capability found\n"); + return -1; + } + + ips->security.ol_flags = sec_cap->ol_flags; + ips->security.ctx = sec_ctx; + sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; + + if (IS_IP6(sa->flags)) { + sa->pattern[1].mask = &rte_flow_item_ipv6_mask; + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; + sa->pattern[1].spec = &sa->ipv6_spec; + + memcpy(sa->ipv6_spec.hdr.dst_addr, + sa->dst.ip.ip6.ip6_b, 16); + memcpy(sa->ipv6_spec.hdr.src_addr, + sa->src.ip.ip6.ip6_b, 16); + } else if (IS_IP4(sa->flags)) { + sa->pattern[1].mask = &rte_flow_item_ipv4_mask; + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; + sa->pattern[1].spec = &sa->ipv4_spec; + + sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; + sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; + } + + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[2].spec = &sa->esp_spec; + sa->pattern[2].mask = &rte_flow_item_esp_mask; + sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); + + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + + sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; + sa->action[0].conf = ips->security.ses; + + sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + + sa->attr.egress = (sa->direction == + RTE_SECURITY_IPSEC_SA_DIR_EGRESS); + sa->attr.ingress = (sa->direction == + RTE_SECURITY_IPSEC_SA_DIR_INGRESS); + if (sa->attr.ingress) { + uint8_t rss_key[40]; + struct rte_eth_rss_conf rss_conf = { + .rss_key = rss_key, + .rss_key_len = 40, + }; + struct rte_eth_dev_info dev_info; + uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; + struct rte_flow_action_rss action_rss; + unsigned int i; + unsigned int j; + + /* Don't create flow if default flow is created */ + if (flow_info_tbl[sa->portid].rx_def_flow) + return 0; + + ret = rte_eth_dev_info_get(sa->portid, &dev_info); + if (ret != 0) { + RTE_LOG(ERR, IPSEC, + "Error during getting device (port %u) info: %s\n", + sa->portid, strerror(-ret)); + return ret; + } + + sa->action[2].type = RTE_FLOW_ACTION_TYPE_END; + /* Try RSS. */ + sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS; + sa->action[1].conf = &action_rss; + ret = rte_eth_dev_rss_hash_conf_get(sa->portid, + &rss_conf); + if (ret != 0) { + RTE_LOG(ERR, IPSEC, + "rte_eth_dev_rss_hash_conf_get:ret=%d\n", + ret); + return -1; + } + for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i) + queue[j++] = i; + + action_rss = (struct rte_flow_action_rss){ + .types = rss_conf.rss_hf, + .key_len = rss_conf.rss_key_len, + .queue_num = j, + .key = rss_key, + .queue = queue, + }; + ret = rte_flow_validate(sa->portid, &sa->attr, + sa->pattern, sa->action, + &err); + if (!ret) + goto flow_create; + /* Try Queue. */ + sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE; + sa->action[1].conf = + &(struct rte_flow_action_queue){ + .index = 0, + }; + ret = rte_flow_validate(sa->portid, &sa->attr, + sa->pattern, sa->action, + &err); + /* Try End. */ + sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + sa->action[1].conf = NULL; + ret = rte_flow_validate(sa->portid, &sa->attr, + sa->pattern, sa->action, + &err); + if (ret) + goto flow_create_failure; + } else if (sa->attr.egress && + (ips->security.ol_flags & + RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) { + sa->action[1].type = + RTE_FLOW_ACTION_TYPE_PASSTHRU; + sa->action[2].type = + RTE_FLOW_ACTION_TYPE_END; + } +flow_create: + sa->flow = rte_flow_create(sa->portid, + &sa->attr, sa->pattern, sa->action, &err); + if (sa->flow == NULL) { +flow_create_failure: + RTE_LOG(ERR, IPSEC, + "Failed to create ipsec flow msg: %s\n", + err.message); + return -1; + } + } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { + const struct rte_security_capability *sec_cap; + + sec_ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx(sa->portid); + + if (sec_ctx == NULL) { + RTE_LOG(ERR, IPSEC, + "Ethernet device doesn't have security features registered\n"); + return -1; + } + + /* Set IPsec parameters in conf */ + set_ipsec_conf(sa, &(sess_conf.ipsec)); + + /* Save SA as userdata for the security session. When + * the packet is received, this userdata will be + * retrieved using the metadata from the packet. + * + * The PMD is expected to set similar metadata for other + * operations, like rte_eth_event, which are tied to + * security session. In such cases, the userdata could + * be obtained to uniquely identify the security + * parameters denoted. + */ + + sess_conf.userdata = (void *) sa; + + ips->security.ses = rte_security_session_create(sec_ctx, + &sess_conf, skt_ctx->session_pool); + if (ips->security.ses == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + + sec_cap = rte_security_capabilities_get(sec_ctx); + if (sec_cap == NULL) { + RTE_LOG(ERR, IPSEC, + "No capabilities registered\n"); + return -1; + } + + /* iterate until ESP tunnel*/ + while (sec_cap->action != + RTE_SECURITY_ACTION_TYPE_NONE) { + if (sec_cap->action == ips->type && + sec_cap->protocol == + RTE_SECURITY_PROTOCOL_IPSEC && + sec_cap->ipsec.mode == + sess_conf.ipsec.mode && + sec_cap->ipsec.direction == sa->direction) + break; + sec_cap++; + } + + if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { + RTE_LOG(ERR, IPSEC, + "No suitable security capability found\n"); + return -1; + } + + ips->security.ol_flags = sec_cap->ol_flags; + ips->security.ctx = sec_ctx; + } + + return 0; +} + +int +create_ipsec_esp_flow(struct ipsec_sa *sa) +{ + int ret = 0; + struct rte_flow_error err; + if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { + RTE_LOG(ERR, IPSEC, + "No Flow director rule for Egress traffic\n"); + return -1; + } + if (sa->flags == TRANSPORT) { + RTE_LOG(ERR, IPSEC, + "No Flow director rule for transport mode\n"); + return -1; + } + sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; + sa->action[0].conf = &(struct rte_flow_action_queue) { + .index = sa->fdir_qid, + }; + sa->attr.egress = 0; + sa->attr.ingress = 1; + if (IS_IP6(sa->flags)) { + sa->pattern[1].mask = &rte_flow_item_ipv6_mask; + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; + sa->pattern[1].spec = &sa->ipv6_spec; + memcpy(sa->ipv6_spec.hdr.dst_addr, + sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b)); + memcpy(sa->ipv6_spec.hdr.src_addr, + sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b)); + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[2].spec = &sa->esp_spec; + sa->pattern[2].mask = &rte_flow_item_esp_mask; + sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + } else if (IS_IP4(sa->flags)) { + sa->pattern[1].mask = &rte_flow_item_ipv4_mask; + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; + sa->pattern[1].spec = &sa->ipv4_spec; + sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; + sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[2].spec = &sa->esp_spec; + sa->pattern[2].mask = &rte_flow_item_esp_mask; + sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + } + sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + + ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action, + &err); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message); + return ret; + } + + sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern, + sa->action, &err); + if (!sa->flow) { + RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message); + return -1; + } + + return 0; +} + +/* + * queue crypto-ops into PMD queue. + */ +void +enqueue_cop_burst(struct cdev_qp *cqp) +{ + uint32_t i, len, ret; + + len = cqp->len; + ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len); + if (ret < len) { + RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" + " enqueued %u crypto ops out of %u\n", + cqp->id, cqp->qp, ret, len); + /* drop packets that we fail to enqueue */ + for (i = ret; i < len; i++) + rte_pktmbuf_free(cqp->buf[i]->sym->m_src); + } + cqp->in_flight += ret; + cqp->len = 0; +} + +static inline void +enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop) +{ + cqp->buf[cqp->len++] = cop; + + if (cqp->len == MAX_PKT_BURST) + enqueue_cop_burst(cqp); +} + +static inline void +ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], void *sas[], + uint16_t nb_pkts) +{ + int32_t ret = 0, i; + struct ipsec_mbuf_metadata *priv; + struct rte_crypto_sym_op *sym_cop; + struct ipsec_sa *sa; + struct rte_ipsec_session *ips; + + for (i = 0; i < nb_pkts; i++) { + if (unlikely(sas[i] == NULL)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_prefetch0(sas[i]); + rte_prefetch0(pkts[i]); + + priv = get_priv(pkts[i]); + sa = ipsec_mask_saptr(sas[i]); + priv->sa = sa; + ips = ipsec_get_primary_session(sa); + + switch (ips->type) { + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(ips->security.ses == NULL)) && + create_lookaside_session(ipsec_ctx, sa, ips)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + sym_cop = get_sym_cop(&priv->cop); + sym_cop->m_src = pkts[i]; + + rte_security_attach_session(&priv->cop, + ips->security.ses); + break; + + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the" + " legacy mode."); + rte_pktmbuf_free(pkts[i]); + continue; + + case RTE_SECURITY_ACTION_TYPE_NONE: + + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(ips->crypto.ses == NULL)) && + create_lookaside_session(ipsec_ctx, sa, ips)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_crypto_op_attach_sym_session(&priv->cop, + ips->crypto.ses); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + RTE_ASSERT(ips->security.ses != NULL); + ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; + if (ips->security.ol_flags & + RTE_SECURITY_TX_OLOAD_NEED_MDATA) + rte_security_set_pkt_metadata( + ips->security.ctx, ips->security.ses, + pkts[i], NULL); + continue; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + RTE_ASSERT(ips->security.ses != NULL); + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + rte_security_attach_session(&priv->cop, + ips->security.ses); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; + if (ips->security.ol_flags & + RTE_SECURITY_TX_OLOAD_NEED_MDATA) + rte_security_set_pkt_metadata( + ips->security.ctx, ips->security.ses, + pkts[i], NULL); + continue; + } + + RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps); + enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop); + } +} + +static inline int32_t +ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], uint16_t max_pkts) +{ + int32_t nb_pkts, ret; + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + struct rte_mbuf *pkt; + + nb_pkts = 0; + while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { + pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt]; + rte_prefetch0(pkt); + priv = get_priv(pkt); + sa = priv->sa; + ret = xform_func(pkt, sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + pkts[nb_pkts++] = pkt; + } + + return nb_pkts; +} + +static inline int +ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], uint16_t max_pkts) +{ + int32_t nb_pkts = 0, ret = 0, i, j, nb_cops; + struct ipsec_mbuf_metadata *priv; + struct rte_crypto_op *cops[max_pkts]; + struct ipsec_sa *sa; + struct rte_mbuf *pkt; + + for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) { + struct cdev_qp *cqp; + + cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++]; + if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) + ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; + + if (cqp->in_flight == 0) + continue; + + nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, + cops, max_pkts - nb_pkts); + + cqp->in_flight -= nb_cops; + + for (j = 0; j < nb_cops; j++) { + pkt = cops[j]->sym->m_src; + rte_prefetch0(pkt); + + priv = get_priv(pkt); + sa = priv->sa; + + RTE_ASSERT(sa != NULL); + + if (ipsec_get_action_type(sa) == + RTE_SECURITY_ACTION_TYPE_NONE) { + ret = xform_func(pkt, sa, cops[j]); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + } else if (ipsec_get_action_type(sa) == + RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { + if (cops[j]->status) { + rte_pktmbuf_free(pkt); + continue; + } + } + pkts[nb_pkts++] = pkt; + } + } + + /* return packets */ + return nb_pkts; +} + +uint16_t +ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t nb_pkts, uint16_t len) +{ + void *sas[nb_pkts]; + + inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts); + + ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts); + + return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t len) +{ + return ipsec_dequeue(esp_inbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len) +{ + void *sas[nb_pkts]; + + outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts); + + ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts); + + return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t len) +{ + return ipsec_dequeue(esp_outbound_post, ctx, pkts, len); +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec.h b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.h new file mode 100644 index 000000000..2f6919900 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec.h @@ -0,0 +1,422 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#ifndef __IPSEC_H__ +#define __IPSEC_H__ + +#include <stdint.h> + +#include <rte_byteorder.h> +#include <rte_crypto.h> +#include <rte_security.h> +#include <rte_flow.h> +#include <rte_ipsec.h> + +#include "ipsec-secgw.h" + +#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 +#define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3 + +#define MAX_INFLIGHT 128 +#define MAX_QP_PER_LCORE 256 + +#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */ + +#define IPSEC_OFFLOAD_ESN_SOFTLIMIT 0xffffff00 + +#define IV_OFFSET (sizeof(struct rte_crypto_op) + \ + sizeof(struct rte_crypto_sym_op)) + +#define uint32_t_to_char(ip, a, b, c, d) do {\ + *a = (uint8_t)(ip >> 24 & 0xff);\ + *b = (uint8_t)(ip >> 16 & 0xff);\ + *c = (uint8_t)(ip >> 8 & 0xff);\ + *d = (uint8_t)(ip & 0xff);\ + } while (0) + +#define DEFAULT_MAX_CATEGORIES 1 + +#define INVALID_SPI (0) + +#define DISCARD INVALID_SPI +#define BYPASS UINT32_MAX + +#define IPSEC_XFORM_MAX 2 + +#define IP6_VERSION (6) + +struct rte_crypto_xform; +struct ipsec_xform; +struct rte_mbuf; + +struct ipsec_sa; +/* + * Keeps number of configured SA's for each address family: + */ +struct ipsec_sa_cnt { + uint32_t nb_v4; + uint32_t nb_v6; +}; + +typedef int32_t (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +struct ip_addr { + union { + uint32_t ip4; + union { + uint64_t ip6[2]; + uint8_t ip6_b[16]; + } ip6; + } ip; +}; + +#define MAX_KEY_SIZE 36 + +/* + * application wide SA parameters + */ +struct app_sa_prm { + uint32_t enable; /* use librte_ipsec API for ipsec pkt processing */ + uint32_t window_size; /* replay window size */ + uint32_t enable_esn; /* enable/disable ESN support */ + uint32_t cache_sz; /* per lcore SA cache size */ + uint64_t flags; /* rte_ipsec_sa_prm.flags */ +}; + +extern struct app_sa_prm app_sa_prm; + +struct flow_info { + struct rte_flow *rx_def_flow; +}; + +extern struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; + +enum { + IPSEC_SESSION_PRIMARY = 0, + IPSEC_SESSION_FALLBACK = 1, + IPSEC_SESSION_MAX +}; + +#define IPSEC_SA_OFFLOAD_FALLBACK_FLAG (1) + +static inline struct ipsec_sa * +ipsec_mask_saptr(void *ptr) +{ + uintptr_t i = (uintptr_t)ptr; + static const uintptr_t mask = IPSEC_SA_OFFLOAD_FALLBACK_FLAG; + + i &= ~mask; + + return (struct ipsec_sa *)i; +} + +struct ipsec_sa { + struct rte_ipsec_session sessions[IPSEC_SESSION_MAX]; + uint32_t spi; + uint32_t cdev_id_qp; + uint64_t seq; + uint32_t salt; + uint32_t fallback_sessions; + enum rte_crypto_cipher_algorithm cipher_algo; + enum rte_crypto_auth_algorithm auth_algo; + enum rte_crypto_aead_algorithm aead_algo; + uint16_t digest_len; + uint16_t iv_len; + uint16_t block_size; + uint16_t flags; +#define IP4_TUNNEL (1 << 0) +#define IP6_TUNNEL (1 << 1) +#define TRANSPORT (1 << 2) +#define IP4_TRANSPORT (1 << 3) +#define IP6_TRANSPORT (1 << 4) + struct ip_addr src; + struct ip_addr dst; + uint8_t cipher_key[MAX_KEY_SIZE]; + uint16_t cipher_key_len; + uint8_t auth_key[MAX_KEY_SIZE]; + uint16_t auth_key_len; + uint16_t aad_len; + union { + struct rte_crypto_sym_xform *xforms; + struct rte_security_ipsec_xform *sec_xform; + }; + enum rte_security_ipsec_sa_direction direction; + uint16_t portid; + uint8_t fdir_qid; + uint8_t fdir_flag; + +#define MAX_RTE_FLOW_PATTERN (4) +#define MAX_RTE_FLOW_ACTIONS (3) + struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN]; + struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS]; + struct rte_flow_attr attr; + union { + struct rte_flow_item_ipv4 ipv4_spec; + struct rte_flow_item_ipv6 ipv6_spec; + }; + struct rte_flow_item_esp esp_spec; + struct rte_flow *flow; + struct rte_security_session_conf sess_conf; +} __rte_cache_aligned; + +struct ipsec_xf { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; +}; + +struct ipsec_sad { + struct rte_ipsec_sad *sad_v4; + struct rte_ipsec_sad *sad_v6; +}; + +struct sa_ctx { + void *satbl; /* pointer to array of rte_ipsec_sa objects*/ + struct ipsec_sad sad; + struct ipsec_xf *xf; + uint32_t nb_sa; + struct ipsec_sa sa[]; +}; + +struct ipsec_mbuf_metadata { + struct ipsec_sa *sa; + struct rte_crypto_op cop; + struct rte_crypto_sym_op sym_cop; + uint8_t buf[32]; +} __rte_cache_aligned; + +#define IS_TRANSPORT(flags) ((flags) & TRANSPORT) + +#define IS_TUNNEL(flags) ((flags) & (IP4_TUNNEL | IP6_TUNNEL)) + +#define IS_IP4(flags) ((flags) & (IP4_TUNNEL | IP4_TRANSPORT)) + +#define IS_IP6(flags) ((flags) & (IP6_TUNNEL | IP6_TRANSPORT)) + +#define IS_IP4_TUNNEL(flags) ((flags) & IP4_TUNNEL) + +#define IS_IP6_TUNNEL(flags) ((flags) & IP6_TUNNEL) + +/* + * Macro for getting ipsec_sa flags statuses without version of protocol + * used for transport (IP4_TRANSPORT and IP6_TRANSPORT flags). + */ +#define WITHOUT_TRANSPORT_VERSION(flags) \ + ((flags) & (IP4_TUNNEL | \ + IP6_TUNNEL | \ + TRANSPORT)) + +struct cdev_qp { + uint16_t id; + uint16_t qp; + uint16_t in_flight; + uint16_t len; + struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct ipsec_ctx { + struct rte_hash *cdev_map; + struct sp_ctx *sp4_ctx; + struct sp_ctx *sp6_ctx; + struct sa_ctx *sa_ctx; + uint16_t nb_qps; + uint16_t last_qp; + struct cdev_qp tbl[MAX_QP_PER_LCORE]; + struct rte_mempool *session_pool; + struct rte_mempool *session_priv_pool; + struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); + uint16_t ol_pkts_cnt; + uint64_t ipv4_offloads; + uint64_t ipv6_offloads; +}; + +struct cdev_key { + uint16_t lcore_id; + uint8_t cipher_algo; + uint8_t auth_algo; + uint8_t aead_algo; +}; + +struct socket_ctx { + struct sa_ctx *sa_in; + struct sa_ctx *sa_out; + struct sp_ctx *sp_ip4_in; + struct sp_ctx *sp_ip4_out; + struct sp_ctx *sp_ip6_in; + struct sp_ctx *sp_ip6_out; + struct rt_ctx *rt_ip4; + struct rt_ctx *rt_ip6; + struct rte_mempool *mbuf_pool; + struct rte_mempool *mbuf_pool_indir; + struct rte_mempool *session_pool; + struct rte_mempool *session_priv_pool; +}; + +struct cnt_blk { + uint32_t salt; + uint64_t iv; + uint32_t cnt; +} __rte_packed; + +/* Socket ctx */ +extern struct socket_ctx socket_ctx[NB_SOCKETS]; + +void +ipsec_poll_mode_worker(void); + +int +ipsec_launch_one_lcore(void *args); + +extern struct ipsec_sa *sa_out; +extern uint32_t nb_sa_out; + +extern struct ipsec_sa *sa_in; +extern uint32_t nb_sa_in; + +uint16_t +ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t nb_pkts, uint16_t len); + +uint16_t +ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len); + +uint16_t +ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t len); + +uint16_t +ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t len); + +void +ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf); + +void +ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf); + +static inline uint16_t +ipsec_metadata_size(void) +{ + return sizeof(struct ipsec_mbuf_metadata); +} + +static inline struct ipsec_mbuf_metadata * +get_priv(struct rte_mbuf *m) +{ + return rte_mbuf_to_priv(m); +} + +static inline void * +get_cnt_blk(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[0]; +} + +static inline void * +get_aad(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[16]; +} + +static inline void * +get_sym_cop(struct rte_crypto_op *cop) +{ + return (cop + 1); +} + +static inline struct rte_ipsec_session * +ipsec_get_primary_session(struct ipsec_sa *sa) +{ + return &sa->sessions[IPSEC_SESSION_PRIMARY]; +} + +static inline struct rte_ipsec_session * +ipsec_get_fallback_session(struct ipsec_sa *sa) +{ + return &sa->sessions[IPSEC_SESSION_FALLBACK]; +} + +static inline enum rte_security_session_action_type +ipsec_get_action_type(struct ipsec_sa *sa) +{ + struct rte_ipsec_session *ips; + ips = ipsec_get_primary_session(sa); + return ips->type; +} + +int +inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx); + +void +inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], + void *sa[], uint16_t nb_pkts); + +void +outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], + void *sa[], uint16_t nb_pkts); + +void +sp4_init(struct socket_ctx *ctx, int32_t socket_id); + +void +sp6_init(struct socket_ctx *ctx, int32_t socket_id); + +/* + * Search through SP rules for given SPI. + * Returns first rule index if found(greater or equal then zero), + * or -ENOENT otherwise. + */ +int +sp4_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]); +int +sp6_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]); + +/* + * Search through SA entries for given SPI. + * Returns first entry index if found(greater or equal then zero), + * or -ENOENT otherwise. + */ +int +sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound); + +void +sa_init(struct socket_ctx *ctx, int32_t socket_id); + +void +rt_init(struct socket_ctx *ctx, int32_t socket_id); + +int +sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, + uint64_t *tx_offloads); + +int +add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr); + +void +enqueue_cop_burst(struct cdev_qp *cqp); + +int +create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips); + +int +create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips); +int +check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid); + +int +create_ipsec_esp_flow(struct ipsec_sa *sa); + +uint32_t +get_nb_crypto_sessions(void); + +#endif /* __IPSEC_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec_process.c b/src/spdk/dpdk/examples/ipsec-secgw/ipsec_process.c new file mode 100644 index 000000000..6d3a3c9a1 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec_process.c @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2020 Intel Corporation + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#include <rte_branch_prediction.h> +#include <rte_log.h> +#include <rte_cryptodev.h> +#include <rte_ethdev.h> +#include <rte_mbuf.h> + +#include "ipsec.h" + +#define SATP_OUT_IPV4(t) \ + ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \ + (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \ + ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4) + +/* helper routine to free bulk of packets */ +static inline void +free_pkts(struct rte_mbuf *mb[], uint32_t n) +{ + uint32_t i; + + for (i = 0; i != n; i++) + rte_pktmbuf_free(mb[i]); +} + +/* helper routine to free bulk of crypto-ops and related packets */ +static inline void +free_cops(struct rte_crypto_op *cop[], uint32_t n) +{ + uint32_t i; + + for (i = 0; i != n; i++) + rte_pktmbuf_free(cop[i]->sym->m_src); +} + +/* helper routine to enqueue bulk of crypto ops */ +static inline void +enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num) +{ + uint32_t i, k, len, n; + + len = cqp->len; + + /* + * if cqp is empty and we have enough ops, + * then queue them to the PMD straightway. + */ + if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) { + n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num); + cqp->in_flight += n; + free_cops(cop + n, num - n); + return; + } + + k = 0; + + do { + n = RTE_DIM(cqp->buf) - len; + n = RTE_MIN(num - k, n); + + /* put packets into cqp */ + for (i = 0; i != n; i++) + cqp->buf[len + i] = cop[k + i]; + + len += n; + k += n; + + /* if cqp is full then, enqueue crypto-ops to PMD */ + if (len == RTE_DIM(cqp->buf)) { + n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, + cqp->buf, len); + cqp->in_flight += n; + free_cops(cqp->buf + n, len - n); + len = 0; + } + + + } while (k != num); + + cqp->len = len; +} + +static inline int +fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx, + struct ipsec_sa *sa) +{ + int32_t rc; + + /* setup crypto section */ + if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE || + ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { + RTE_ASSERT(ss->crypto.ses == NULL); + rc = create_lookaside_session(ctx, sa, ss); + if (rc != 0) + return rc; + /* setup session action type */ + } else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { + RTE_ASSERT(ss->security.ses == NULL); + rc = create_lookaside_session(ctx, sa, ss); + if (rc != 0) + return rc; + } else + RTE_ASSERT(0); + + rc = rte_ipsec_session_prepare(ss); + if (rc != 0) + memset(ss, 0, sizeof(*ss)); + + return rc; +} + +/* + * group input packets byt the SA they belong to. + */ +static uint32_t +sa_group(void *sa_ptr[], struct rte_mbuf *pkts[], + struct rte_ipsec_group grp[], uint32_t num) +{ + uint32_t i, n, spi; + void *sa; + void * const nosa = &spi; + + sa = nosa; + grp[0].m = pkts; + for (i = 0, n = 0; i != num; i++) { + + if (sa != sa_ptr[i]) { + grp[n].cnt = pkts + i - grp[n].m; + n += (sa != nosa); + grp[n].id.ptr = sa_ptr[i]; + grp[n].m = pkts + i; + sa = sa_ptr[i]; + } + } + + /* terminate last group */ + if (sa != nosa) { + grp[n].cnt = pkts + i - grp[n].m; + n++; + } + + return n; +} + +/* + * helper function, splits processed packets into ipv4/ipv6 traffic. + */ +static inline void +copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[], + uint32_t num) +{ + uint32_t j, ofs, s; + struct traffic_type *out; + + /* + * determine traffic type(ipv4/ipv6) and offset for ACL classify + * based on SA type + */ + if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) { + if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) { + out = &trf->ip4; + ofs = offsetof(struct ip, ip_p); + } else { + out = &trf->ip6; + ofs = offsetof(struct ip6_hdr, ip6_nxt); + } + } else if (SATP_OUT_IPV4(satp)) { + out = &trf->ip4; + ofs = offsetof(struct ip, ip_p); + } else { + out = &trf->ip6; + ofs = offsetof(struct ip6_hdr, ip6_nxt); + } + + for (j = 0, s = out->num; j != num; j++) { + out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j], + void *, ofs); + out->pkts[s + j] = mb[j]; + } + + out->num += num; +} + +static uint32_t +ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips, struct rte_mbuf **m, + unsigned int cnt) +{ + struct cdev_qp *cqp; + struct rte_crypto_op *cop[cnt]; + uint32_t j, k; + struct ipsec_mbuf_metadata *priv; + + cqp = &ctx->tbl[sa->cdev_id_qp]; + + /* for that app each mbuf has it's own crypto op */ + for (j = 0; j != cnt; j++) { + priv = get_priv(m[j]); + cop[j] = &priv->cop; + /* + * this is just to satisfy inbound_sa_check() + * should be removed in future. + */ + priv->sa = sa; + } + + /* prepare and enqueue crypto ops */ + k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt); + if (k != 0) + enqueue_cop_bulk(cqp, cop, k); + + return k; +} + +/* + * helper routine for inline and cpu(synchronous) processing + * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt(). + * Should be removed in future. + */ +static inline void +prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint32_t j; + struct ipsec_mbuf_metadata *priv; + + for (j = 0; j != cnt; j++) { + priv = get_priv(mb[j]); + priv->sa = sa; + } +} + +/* + * finish processing of packets successfully decrypted by an inline processor + */ +static uint32_t +ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_process(ips, mb, cnt); + copy_to_trf(trf, satp, mb, k); + return k; +} + +/* + * process packets synchronously + */ +static uint32_t +ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt); + k = rte_ipsec_pkt_process(ips, mb, k); + copy_to_trf(trf, satp, mb, k); + return k; +} + +/* + * Process ipsec packets. + * If packet belong to SA that is subject of inline-crypto, + * then process it immediately. + * Otherwise do necessary preparations and queue it to related + * crypto-dev queue. + */ +void +ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) +{ + uint32_t i, k, n; + struct ipsec_sa *sa; + struct rte_ipsec_group *pg; + struct rte_ipsec_session *ips; + struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; + + n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num); + + for (i = 0; i != n; i++) { + + pg = grp + i; + sa = ipsec_mask_saptr(pg->id.ptr); + + /* fallback to cryptodev with RX packets which inline + * processor was unable to process + */ + if (sa != NULL) + ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ? + ipsec_get_fallback_session(sa) : + ipsec_get_primary_session(sa); + + /* no valid HW session for that SA, try to create one */ + if (sa == NULL || (ips->crypto.ses == NULL && + fill_ipsec_session(ips, ctx, sa) != 0)) + k = 0; + + /* process packets inline */ + else { + switch (ips->type) { + /* enqueue packets to crypto dev */ + case RTE_SECURITY_ACTION_TYPE_NONE: + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + k = ipsec_prepare_crypto_group(ctx, sa, ips, + pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + k = ipsec_process_inline_group(ips, sa, + trf, pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + k = ipsec_process_cpu_group(ips, sa, + trf, pg->m, pg->cnt); + break; + default: + k = 0; + } + } + + /* drop packets that cannot be enqueued/processed */ + if (k != pg->cnt) + free_pkts(pg->m + k, pg->cnt - k); + } +} + +static inline uint32_t +cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num) +{ + uint32_t n; + + if (cqp->in_flight == 0) + return 0; + + n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num); + RTE_ASSERT(cqp->in_flight >= n); + cqp->in_flight -= n; + + return n; +} + +static inline uint32_t +ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num) +{ + uint32_t i, n; + + n = 0; + + for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++) + n += cqp_dequeue(ctx->tbl + i, cop + n, num - n); + + for (i = 0; n != num && i != ctx->last_qp; i++) + n += cqp_dequeue(ctx->tbl + i, cop + n, num - n); + + ctx->last_qp = i; + return n; +} + +/* + * dequeue packets from crypto-queues and finalize processing. + */ +void +ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) +{ + uint64_t satp; + uint32_t i, k, n, ng; + struct rte_ipsec_session *ss; + struct traffic_type *out; + struct rte_ipsec_group *pg; + struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)]; + struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; + + trf->ip4.num = 0; + trf->ip6.num = 0; + + out = &trf->ipsec; + + /* dequeue completed crypto-ops */ + n = ctx_dequeue(ctx, cop, RTE_DIM(cop)); + if (n == 0) + return; + + /* group them by ipsec session */ + ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **) + (uintptr_t)cop, out->pkts, grp, n); + + /* process each group of packets */ + for (i = 0; i != ng; i++) { + + pg = grp + i; + ss = pg->id.ptr; + satp = rte_ipsec_sa_type(ss->sa); + + k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt); + copy_to_trf(trf, satp, pg->m, k); + + /* free bad packets, if any */ + free_pkts(pg->m + k, pg->cnt - k); + + n -= pg->cnt; + } + + /* we should never have packet with unknown SA here */ + RTE_VERIFY(n == 0); +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec_worker.c b/src/spdk/dpdk/examples/ipsec-secgw/ipsec_worker.c new file mode 100644 index 000000000..b6c851f25 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec_worker.c @@ -0,0 +1,644 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + * Copyright (C) 2020 Marvell International Ltd. + */ +#include <rte_acl.h> +#include <rte_event_eth_tx_adapter.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> + +#include "event_helper.h" +#include "ipsec.h" +#include "ipsec-secgw.h" +#include "ipsec_worker.h" + +static inline enum pkt_type +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) +{ + struct rte_ether_hdr *eth; + + eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + + offsetof(struct ip, ip_p)); + if (**nlp == IPPROTO_ESP) + return PKT_TYPE_IPSEC_IPV4; + else + return PKT_TYPE_PLAIN_IPV4; + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + + offsetof(struct ip6_hdr, ip6_nxt)); + if (**nlp == IPPROTO_ESP) + return PKT_TYPE_IPSEC_IPV6; + else + return PKT_TYPE_PLAIN_IPV6; + } + + /* Unknown/Unsupported type */ + return PKT_TYPE_INVALID; +} + +static inline void +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) +{ + struct rte_ether_hdr *ethhdr; + + ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); + memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); +} + +static inline void +ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) +{ + /* Save the destination port in the mbuf */ + m->port = port_id; + + /* Save eth queue for Tx */ + rte_event_eth_tx_adapter_txq_set(m, 0); +} + +static inline void +prepare_out_sessions_tbl(struct sa_ctx *sa_out, + struct rte_security_session **sess_tbl, uint16_t size) +{ + struct rte_ipsec_session *pri_sess; + struct ipsec_sa *sa; + uint32_t i; + + if (!sa_out) + return; + + for (i = 0; i < sa_out->nb_sa; i++) { + + sa = &sa_out->sa[i]; + if (!sa) + continue; + + pri_sess = ipsec_get_primary_session(sa); + if (!pri_sess) + continue; + + if (pri_sess->type != + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { + + RTE_LOG(ERR, IPSEC, "Invalid session type %d\n", + pri_sess->type); + continue; + } + + if (sa->portid >= size) { + RTE_LOG(ERR, IPSEC, + "Port id >= than table size %d, %d\n", + sa->portid, size); + continue; + } + + /* Use only first inline session found for a given port */ + if (sess_tbl[sa->portid]) + continue; + sess_tbl[sa->portid] = pri_sess->security.ses; + } +} + +static inline int +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) +{ + uint32_t res; + + if (unlikely(sp == NULL)) + return 0; + + rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, + DEFAULT_MAX_CATEGORIES); + + if (unlikely(res == DISCARD)) + return 0; + else if (res == BYPASS) { + *sa_idx = -1; + return 1; + } + + *sa_idx = res - 1; + return 1; +} + +static inline uint16_t +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) +{ + uint32_t dst_ip; + uint16_t offset; + uint32_t hop; + int ret; + + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); + dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); + dst_ip = rte_be_to_cpu_32(dst_ip); + + ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); + + if (ret == 0) { + /* We have a hit */ + return hop; + } + + /* else */ + return RTE_MAX_ETHPORTS; +} + +/* TODO: To be tested */ +static inline uint16_t +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) +{ + uint8_t dst_ip[16]; + uint8_t *ip6_dst; + uint16_t offset; + uint32_t hop; + int ret; + + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); + memcpy(&dst_ip[0], ip6_dst, 16); + + ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); + + if (ret == 0) { + /* We have a hit */ + return hop; + } + + /* else */ + return RTE_MAX_ETHPORTS; +} + +static inline uint16_t +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) +{ + if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) + return route4_pkt(pkt, rt->rt4_ctx); + else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) + return route6_pkt(pkt, rt->rt6_ctx); + + return RTE_MAX_ETHPORTS; +} + +static inline int +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, + struct rte_event *ev) +{ + struct ipsec_sa *sa = NULL; + struct rte_mbuf *pkt; + uint16_t port_id = 0; + enum pkt_type type; + uint32_t sa_idx; + uint8_t *nlp; + + /* Get pkt from event */ + pkt = ev->mbuf; + + /* Check the packet type */ + type = process_ipsec_get_pkt_type(pkt, &nlp); + + switch (type) { + case PKT_TYPE_PLAIN_IPV4: + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (unlikely(pkt->ol_flags & + PKT_RX_SEC_OFFLOAD_FAILED)) { + RTE_LOG(ERR, IPSEC, + "Inbound security offload failed\n"); + goto drop_pkt_and_exit; + } + sa = pkt->userdata; + } + + /* Check if we have a match */ + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + + case PKT_TYPE_PLAIN_IPV6: + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (unlikely(pkt->ol_flags & + PKT_RX_SEC_OFFLOAD_FAILED)) { + RTE_LOG(ERR, IPSEC, + "Inbound security offload failed\n"); + goto drop_pkt_and_exit; + } + sa = pkt->userdata; + } + + /* Check if we have a match */ + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + + default: + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); + goto drop_pkt_and_exit; + } + + /* Check if the packet has to be bypassed */ + if (sa_idx == BYPASS) + goto route_and_send_pkt; + + /* Validate sa_idx */ + if (sa_idx >= ctx->sa_ctx->nb_sa) + goto drop_pkt_and_exit; + + /* Else the packet has to be protected with SA */ + + /* If the packet was IPsec processed, then SA pointer should be set */ + if (sa == NULL) + goto drop_pkt_and_exit; + + /* SPI on the packet should match with the one in SA */ + if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi)) + goto drop_pkt_and_exit; + +route_and_send_pkt: + port_id = get_route(pkt, rt, type); + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { + /* no match */ + goto drop_pkt_and_exit; + } + /* else, we have a matching route */ + + /* Update mac addresses */ + update_mac_addrs(pkt, port_id); + + /* Update the event with the dest port */ + ipsec_event_pre_forward(pkt, port_id); + return PKT_FORWARDED; + +drop_pkt_and_exit: + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); + rte_pktmbuf_free(pkt); + ev->mbuf = NULL; + return PKT_DROPPED; +} + +static inline int +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, + struct rte_event *ev) +{ + struct rte_ipsec_session *sess; + struct sa_ctx *sa_ctx; + struct rte_mbuf *pkt; + uint16_t port_id = 0; + struct ipsec_sa *sa; + enum pkt_type type; + uint32_t sa_idx; + uint8_t *nlp; + + /* Get pkt from event */ + pkt = ev->mbuf; + + /* Check the packet type */ + type = process_ipsec_get_pkt_type(pkt, &nlp); + + switch (type) { + case PKT_TYPE_PLAIN_IPV4: + /* Check if we have a match */ + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + case PKT_TYPE_PLAIN_IPV6: + /* Check if we have a match */ + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + default: + /* + * Only plain IPv4 & IPv6 packets are allowed + * on protected port. Drop the rest. + */ + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); + goto drop_pkt_and_exit; + } + + /* Check if the packet has to be bypassed */ + if (sa_idx == BYPASS) { + port_id = get_route(pkt, rt, type); + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { + /* no match */ + goto drop_pkt_and_exit; + } + /* else, we have a matching route */ + goto send_pkt; + } + + /* Validate sa_idx */ + if (sa_idx >= ctx->sa_ctx->nb_sa) + goto drop_pkt_and_exit; + + /* Else the packet has to be protected */ + + /* Get SA ctx*/ + sa_ctx = ctx->sa_ctx; + + /* Get SA */ + sa = &(sa_ctx->sa[sa_idx]); + + /* Get IPsec session */ + sess = ipsec_get_primary_session(sa); + + /* Allow only inline protocol for now */ + if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { + RTE_LOG(ERR, IPSEC, "SA type not supported\n"); + goto drop_pkt_and_exit; + } + + if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) + pkt->userdata = sess->security.ses; + + /* Mark the packet for Tx security offload */ + pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; + + /* Get the port to which this pkt need to be submitted */ + port_id = sa->portid; + +send_pkt: + /* Update mac addresses */ + update_mac_addrs(pkt, port_id); + + /* Update the event with the dest port */ + ipsec_event_pre_forward(pkt, port_id); + return PKT_FORWARDED; + +drop_pkt_and_exit: + RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); + rte_pktmbuf_free(pkt); + ev->mbuf = NULL; + return PKT_DROPPED; +} + +/* + * Event mode exposes various operating modes depending on the + * capabilities of the event device and the operating mode + * selected. + */ + +/* Workers registered */ +#define IPSEC_EVENTMODE_WORKERS 2 + +/* + * Event mode worker + * Operating parameters : non-burst - Tx internal port - driver mode + */ +static void +ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links, + uint8_t nb_links) +{ + struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL }; + unsigned int nb_rx = 0; + struct rte_mbuf *pkt; + struct rte_event ev; + uint32_t lcore_id; + int32_t socket_id; + int16_t port_id; + + /* Check if we have links registered for this lcore */ + if (nb_links == 0) { + /* No links registered - exit */ + return; + } + + /* Get core ID */ + lcore_id = rte_lcore_id(); + + /* Get socket ID */ + socket_id = rte_lcore_to_socket_id(lcore_id); + + /* + * Prepare security sessions table. In outbound driver mode + * we always use first session configured for a given port + */ + prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl, + RTE_MAX_ETHPORTS); + + RTE_LOG(INFO, IPSEC, + "Launching event mode worker (non-burst - Tx internal port - " + "driver mode) on lcore %d\n", lcore_id); + + /* We have valid links */ + + /* Check if it's single link */ + if (nb_links != 1) { + RTE_LOG(INFO, IPSEC, + "Multiple links not supported. Using first link\n"); + } + + RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, + links[0].event_port_id); + while (!force_quit) { + /* Read packet from event queues */ + nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, + links[0].event_port_id, + &ev, /* events */ + 1, /* nb_events */ + 0 /* timeout_ticks */); + + if (nb_rx == 0) + continue; + + pkt = ev.mbuf; + port_id = pkt->port; + + rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); + + /* Process packet */ + ipsec_event_pre_forward(pkt, port_id); + + if (!is_unprotected_port(port_id)) { + + if (unlikely(!sess_tbl[port_id])) { + rte_pktmbuf_free(pkt); + continue; + } + + /* Save security session */ + pkt->userdata = sess_tbl[port_id]; + + /* Mark the packet for Tx security offload */ + pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; + } + + /* + * Since tx internal port is available, events can be + * directly enqueued to the adapter and it would be + * internally submitted to the eth device. + */ + rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, + links[0].event_port_id, + &ev, /* events */ + 1, /* nb_events */ + 0 /* flags */); + } +} + +/* + * Event mode worker + * Operating parameters : non-burst - Tx internal port - app mode + */ +static void +ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, + uint8_t nb_links) +{ + struct lcore_conf_ev_tx_int_port_wrkr lconf; + unsigned int nb_rx = 0; + struct rte_event ev; + uint32_t lcore_id; + int32_t socket_id; + int ret; + + /* Check if we have links registered for this lcore */ + if (nb_links == 0) { + /* No links registered - exit */ + return; + } + + /* We have valid links */ + + /* Get core ID */ + lcore_id = rte_lcore_id(); + + /* Get socket ID */ + socket_id = rte_lcore_to_socket_id(lcore_id); + + /* Save routing table */ + lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; + lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; + lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; + lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; + lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; + lconf.inbound.session_pool = socket_ctx[socket_id].session_pool; + lconf.inbound.session_priv_pool = + socket_ctx[socket_id].session_priv_pool; + lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; + lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; + lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; + lconf.outbound.session_pool = socket_ctx[socket_id].session_pool; + lconf.outbound.session_priv_pool = + socket_ctx[socket_id].session_priv_pool; + + RTE_LOG(INFO, IPSEC, + "Launching event mode worker (non-burst - Tx internal port - " + "app mode) on lcore %d\n", lcore_id); + + /* Check if it's single link */ + if (nb_links != 1) { + RTE_LOG(INFO, IPSEC, + "Multiple links not supported. Using first link\n"); + } + + RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, + links[0].event_port_id); + + while (!force_quit) { + /* Read packet from event queues */ + nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, + links[0].event_port_id, + &ev, /* events */ + 1, /* nb_events */ + 0 /* timeout_ticks */); + + if (nb_rx == 0) + continue; + + if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) { + RTE_LOG(ERR, IPSEC, "Invalid event type %u", + ev.event_type); + + continue; + } + + if (is_unprotected_port(ev.mbuf->port)) + ret = process_ipsec_ev_inbound(&lconf.inbound, + &lconf.rt, &ev); + else + ret = process_ipsec_ev_outbound(&lconf.outbound, + &lconf.rt, &ev); + if (ret != 1) + /* The pkt has been dropped */ + continue; + + /* + * Since tx internal port is available, events can be + * directly enqueued to the adapter and it would be + * internally submitted to the eth device. + */ + rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, + links[0].event_port_id, + &ev, /* events */ + 1, /* nb_events */ + 0 /* flags */); + } +} + +static uint8_t +ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs) +{ + struct eh_app_worker_params *wrkr; + uint8_t nb_wrkr_param = 0; + + /* Save workers */ + wrkr = wrkrs; + + /* Non-burst - Tx internal port - driver mode */ + wrkr->cap.burst = EH_RX_TYPE_NON_BURST; + wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; + wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; + wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode; + wrkr++; + nb_wrkr_param++; + + /* Non-burst - Tx internal port - app mode */ + wrkr->cap.burst = EH_RX_TYPE_NON_BURST; + wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; + wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP; + wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode; + nb_wrkr_param++; + + return nb_wrkr_param; +} + +static void +ipsec_eventmode_worker(struct eh_conf *conf) +{ + struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = { + {{{0} }, NULL } }; + uint8_t nb_wrkr_param; + + /* Populate l2fwd_wrkr params */ + nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr); + + /* + * Launch correct worker after checking + * the event device's capabilities. + */ + eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param); +} + +int ipsec_launch_one_lcore(void *args) +{ + struct eh_conf *conf; + + conf = (struct eh_conf *)args; + + if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) { + /* Run in poll mode */ + ipsec_poll_mode_worker(); + } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) { + /* Run in event mode */ + ipsec_eventmode_worker(conf); + } + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/ipsec_worker.h b/src/spdk/dpdk/examples/ipsec-secgw/ipsec_worker.h new file mode 100644 index 000000000..5d85cf1f0 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/ipsec_worker.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell International Ltd. + */ +#ifndef _IPSEC_WORKER_H_ +#define _IPSEC_WORKER_H_ + +#include "ipsec.h" + +enum pkt_type { + PKT_TYPE_PLAIN_IPV4 = 1, + PKT_TYPE_IPSEC_IPV4, + PKT_TYPE_PLAIN_IPV6, + PKT_TYPE_IPSEC_IPV6, + PKT_TYPE_INVALID +}; + +enum { + PKT_DROPPED = 0, + PKT_FORWARDED, + PKT_POSTED /* for lookaside case */ +}; + +struct route_table { + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; +}; + +/* + * Conf required by event mode worker with tx internal port + */ +struct lcore_conf_ev_tx_int_port_wrkr { + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct route_table rt; +} __rte_cache_aligned; + +void ipsec_poll_mode_worker(void); + +int ipsec_launch_one_lcore(void *args); + +#endif /* _IPSEC_WORKER_H_ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/meson.build b/src/spdk/dpdk/examples/ipsec-secgw/meson.build new file mode 100644 index 000000000..f9ba2a24b --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +# meson file, for building this example as part of a main DPDK build. +# +# To build this example as a standalone application with an already-installed +# DPDK instance, use 'make' + +deps += ['security', 'lpm', 'acl', 'hash', 'ip_frag', 'ipsec', 'eventdev'] +allow_experimental_apis = true +sources = files( + 'esp.c', 'event_helper.c', 'ipsec.c', 'ipsec_process.c', 'ipsec-secgw.c', + 'ipsec_worker.c', 'parser.c', 'rt.c', 'sa.c', 'sad.c', 'sp4.c', 'sp6.c' +) diff --git a/src/spdk/dpdk/examples/ipsec-secgw/parser.c b/src/spdk/dpdk/examples/ipsec-secgw/parser.c new file mode 100644 index 000000000..65eb7e9e2 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/parser.c @@ -0,0 +1,658 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ +#include <rte_common.h> +#include <rte_crypto.h> +#include <rte_string_fns.h> + +#include <cmdline_parse_string.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_ipaddr.h> +#include <cmdline_socket.h> +#include <cmdline.h> + +#include "ipsec.h" +#include "parser.h" + +#define PARSE_DELIMITER " \f\n\r\t\v" +static int +parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if ((string == NULL) || + (tokens == NULL) || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if ((i == *n_tokens) && + (NULL != strtok_r(string, PARSE_DELIMITER, &string))) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon + * means error */ + if (dbloct_count == 8) + return 0; + + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask) +{ + char ip_str[INET_ADDRSTRLEN] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strlcpy(ip_str, token, + RTE_MIN((unsigned int long)(pch - token + 1), + sizeof(ip_str))); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strlcpy(ip_str, token, sizeof(ip_str)); + if (mask) + *mask = 0; + } + if (strlen(ip_str) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(ip_str, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask) +{ + char ip_str[256] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strlcpy(ip_str, token, + RTE_MIN((unsigned int long)(pch - token + 1), + sizeof(ip_str))); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strlcpy(ip_str, token, sizeof(ip_str)); + if (mask) + *mask = 0; + } + + if (strlen(ip_str) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(ip_str, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +parse_range(const char *token, uint16_t *low, uint16_t *high) +{ + char ch; + char num_str[20]; + uint32_t pos; + int range_low = -1; + int range_high = -1; + + if (!low || !high) + return -1; + + memset(num_str, 0, 20); + pos = 0; + + while ((ch = *token++) != '\0') { + if (isdigit(ch)) { + if (pos >= 19) + return -1; + num_str[pos++] = ch; + } else if (ch == ':') { + if (range_low != -1) + return -1; + range_low = atoi(num_str); + memset(num_str, 0, 20); + pos = 0; + } + } + + if (strlen(num_str) == 0) + return -1; + + range_high = atoi(num_str); + + *low = (uint16_t)range_low; + *high = (uint16_t)range_high; + + return 0; +} + +/* + * helper function for parse_mac, parse one section of the ether addr. + */ +static const char * +parse_uint8x16(const char *s, uint8_t *v, uint8_t ls) +{ + char *end; + unsigned long t; + + errno = 0; + t = strtoul(s, &end, 16); + if (errno != 0 || end[0] != ls || t > UINT8_MAX) + return NULL; + v[0] = t; + return end + 1; +} + +static int +parse_mac(const char *str, struct rte_ether_addr *addr) +{ + uint32_t i; + + static const uint8_t stop_sym[RTE_DIM(addr->addr_bytes)] = { + [0] = ':', + [1] = ':', + [2] = ':', + [3] = ':', + [4] = ':', + [5] = 0, + }; + + for (i = 0; i != RTE_DIM(addr->addr_bytes); i++) { + str = parse_uint8x16(str, addr->addr_bytes + i, stop_sym[i]); + if (str == NULL) + return -EINVAL; + } + + return 0; +} + +/** sp add parse */ +struct cfg_sp_add_cfg_item { + cmdline_fixed_string_t sp_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sp_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sp_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK((parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0), status, "too many arguments"); + + if (status->status < 0) + return; + + if (strcmp(tokens[0], "ipv4") == 0) { + parse_sp4_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else if (strcmp(tokens[0], "ipv6") == 0) { + parse_sp6_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognizable input %s\n", + tokens[0]); + return; + } +} + +static cmdline_parse_token_string_t cfg_sp_add_sp_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, + sp_keyword, "sp"); + +static cmdline_parse_token_string_t cfg_sp_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sp_add_rule = { + .f = cfg_sp_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sp_add_sp_str, + (void *) &cfg_sp_add_multi_str, + NULL, + }, +}; + +/* sa add parse */ +struct cfg_sa_add_cfg_item { + cmdline_fixed_string_t sa_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sa_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sa_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0, status, "too many arguments\n"); + + parse_sa_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_sa_add_sa_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, + sa_keyword, "sa"); + +static cmdline_parse_token_string_t cfg_sa_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sa_add_rule = { + .f = cfg_sa_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sa_add_sa_str, + (void *) &cfg_sa_add_multi_str, + NULL, + }, +}; + +/* rt add parse */ +struct cfg_rt_add_cfg_item { + cmdline_fixed_string_t rt_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_rt_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_rt_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string( + params->multi_string, tokens, &n_tokens) == 0, + status, "too many arguments\n"); + if (status->status < 0) + return; + + parse_rt_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_rt_add_rt_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, + rt_keyword, "rt"); + +static cmdline_parse_token_string_t cfg_rt_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_rt_add_rule = { + .f = cfg_rt_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_rt_add_rt_str, + (void *) &cfg_rt_add_multi_str, + NULL, + }, +}; + +/* neigh add parse */ +struct cfg_neigh_add_item { + cmdline_fixed_string_t neigh; + cmdline_fixed_string_t pstr; + uint16_t port; + cmdline_fixed_string_t mac; +}; + +static void +cfg_parse_neigh(void *parsed_result, __rte_unused struct cmdline *cl, + void *data) +{ + int32_t rc; + struct cfg_neigh_add_item *res; + struct parse_status *st; + struct rte_ether_addr mac; + + st = data; + res = parsed_result; + rc = parse_mac(res->mac, &mac); + APP_CHECK(rc == 0, st, "invalid ether addr:%s", res->mac); + rc = add_dst_ethaddr(res->port, &mac); + APP_CHECK(rc == 0, st, "invalid port numer:%hu", res->port); + if (st->status < 0) + return; +} + +cmdline_parse_token_string_t cfg_add_neigh_start = + TOKEN_STRING_INITIALIZER(struct cfg_neigh_add_item, neigh, "neigh"); +cmdline_parse_token_string_t cfg_add_neigh_pstr = + TOKEN_STRING_INITIALIZER(struct cfg_neigh_add_item, pstr, "port"); +cmdline_parse_token_num_t cfg_add_neigh_port = + TOKEN_NUM_INITIALIZER(struct cfg_neigh_add_item, port, UINT16); +cmdline_parse_token_string_t cfg_add_neigh_mac = + TOKEN_STRING_INITIALIZER(struct cfg_neigh_add_item, mac, NULL); + +cmdline_parse_inst_t cfg_neigh_add_rule = { + .f = cfg_parse_neigh, + .data = NULL, + .help_str = "", + .tokens = { + (void *)&cfg_add_neigh_start, + (void *)&cfg_add_neigh_pstr, + (void *)&cfg_add_neigh_port, + (void *)&cfg_add_neigh_mac, + NULL, + }, +}; + +/** set of cfg items */ +cmdline_parse_ctx_t ipsec_ctx[] = { + (cmdline_parse_inst_t *)&cfg_sp_add_rule, + (cmdline_parse_inst_t *)&cfg_sa_add_rule, + (cmdline_parse_inst_t *)&cfg_rt_add_rule, + (cmdline_parse_inst_t *)&cfg_neigh_add_rule, + NULL, +}; + +int +parse_cfg_file(const char *cfg_filename) +{ + struct cmdline *cl = cmdline_stdin_new(ipsec_ctx, ""); + FILE *f = fopen(cfg_filename, "r"); + char str[1024] = {0}, *get_s = NULL; + uint32_t line_num = 0; + struct parse_status status = {0}; + + if (f == NULL) { + rte_panic("Error: invalid file descriptor %s\n", cfg_filename); + goto error_exit; + } + + if (cl == NULL) { + rte_panic("Error: cannot create cmdline instance\n"); + goto error_exit; + } + + cfg_sp_add_rule.data = &status; + cfg_sa_add_rule.data = &status; + cfg_rt_add_rule.data = &status; + cfg_neigh_add_rule.data = &status; + + do { + char oneline[1024]; + char *pos; + get_s = fgets(oneline, 1024, f); + + if (!get_s) + break; + + line_num++; + + if (strlen(oneline) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + /* process comment char '#' */ + if (oneline[0] == '#') + continue; + + pos = strchr(oneline, '#'); + if (pos != NULL) + *pos = '\0'; + + /* process line concatenator '\' */ + pos = strchr(oneline, 92); + if (pos != NULL) { + if (pos != oneline+strlen(oneline) - 2) { + rte_panic("%s:%u: error: " + "no character should exist after '\\'\n", + cfg_filename, line_num); + goto error_exit; + } + + *pos = '\0'; + + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the concatenated line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + strcpy(str + strlen(str), oneline); + continue; + } + + /* copy the line to str and process */ + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + strcpy(str + strlen(str), oneline); + + str[strlen(str)] = '\n'; + if (cmdline_parse(cl, str) < 0) { + rte_panic("%s:%u: error: parsing \"%s\" failed\n", + cfg_filename, line_num, str); + goto error_exit; + } + + if (status.status < 0) { + rte_panic("%s:%u: error: %s", cfg_filename, + line_num, status.parse_msg); + goto error_exit; + } + + memset(str, 0, 1024); + } while (1); + + cmdline_stdin_exit(cl); + fclose(f); + + sa_sort_arr(); + sp4_sort_arr(); + sp6_sort_arr(); + + return 0; + +error_exit: + if (cl) + cmdline_stdin_exit(cl); + if (f) + fclose(f); + + return -1; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/parser.h b/src/spdk/dpdk/examples/ipsec-secgw/parser.h new file mode 100644 index 000000000..6e764fe92 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/parser.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#ifndef __PARSER_H +#define __PARSER_H + +struct parse_status { + int status; + char parse_msg[256]; +}; + +#define APP_CHECK(exp, st, fmt, ...) \ +do { \ + if (!(exp)) { \ + sprintf((st)->parse_msg, fmt "\n", \ + ## __VA_ARGS__); \ + (st)->status = -1; \ + } else \ + (st)->status = 0; \ +} while (0) + +#define APP_CHECK_PRESENCE(val, str, status) \ + APP_CHECK(val == 0, status, \ + "item \"%s\" already present", str) + +#define APP_CHECK_TOKEN_EQUAL(tokens, index, ref, status) \ + APP_CHECK(strcmp(tokens[index], ref) == 0, status, \ + "unrecognized input \"%s\": expect \"%s\"\n", \ + tokens[index], ref) + +static inline int +is_str_num(const char *str) +{ + uint32_t i; + + for (i = 0; i < strlen(str); i++) + if (!isdigit(str[i])) + return -1; + + return 0; +} + +#define APP_CHECK_TOKEN_IS_NUM(tokens, index, status) \ + APP_CHECK(is_str_num(tokens[index]) == 0, status, \ + "input \"%s\" is not valid number string", tokens[index]) + + +#define INCREMENT_TOKEN_INDEX(index, max_num, status) \ +do { \ + APP_CHECK(index + 1 < max_num, status, "reaching the end of " \ + "the token array"); \ + index++; \ +} while (0) + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask); + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask); + +int +parse_range(const char *token, uint16_t *low, uint16_t *high); + +void +sp4_sort_arr(void); + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +sp6_sort_arr(void); + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +sa_sort_arr(void); + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +int +parse_cfg_file(const char *cfg_filename); + +#endif diff --git a/src/spdk/dpdk/examples/ipsec-secgw/rt.c b/src/spdk/dpdk/examples/ipsec-secgw/rt.c new file mode 100644 index 000000000..ec3a375f0 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/rt.c @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/* + * Routing Table (RT) + */ +#include <sys/types.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> +#include <rte_errno.h> +#include <rte_ip.h> + +#include "ipsec.h" +#include "parser.h" + +#define RT_IPV4_MAX_RULES 1024 +#define RT_IPV6_MAX_RULES 1024 + +struct ip4_route { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +struct ip6_route { + uint8_t ip[16]; + uint8_t depth; + uint8_t if_out; +}; + +struct ip4_route rt_ip4[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip4; + +struct ip6_route rt_ip6[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip6; + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + uint32_t ti; + uint32_t *n_rts = NULL; + struct ip4_route *route_ipv4 = NULL; + struct ip6_route *route_ipv6 = NULL; + + if (strcmp(tokens[0], "ipv4") == 0) { + n_rts = &nb_rt_ip4; + route_ipv4 = &rt_ip4[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV4_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + + } else if (strcmp(tokens[0], "ipv6") == 0) { + n_rts = &nb_rt_ip6; + route_ipv6 = &rt_ip6[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV6_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[0]); + return; + } + + for (ti = 1; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "dst") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (route_ipv4 != NULL) { + struct in_addr ip; + uint32_t depth = 0; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + route_ipv4->ip = rte_bswap32( + (uint32_t)ip.s_addr); + route_ipv4->depth = (uint8_t)depth; + } else { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK(parse_ipv6_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 address", + tokens[ti]); + if (status->status < 0) + return; + memcpy(route_ipv6->ip, ip.s6_addr, 16); + route_ipv6->depth = (uint8_t)depth; + } + } + + if (strcmp(tokens[ti], "port") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + if (route_ipv4 != NULL) + route_ipv4->if_out = atoi(tokens[ti]); + else + route_ipv6->if_out = atoi(tokens[ti]); + } + } + + *n_rts = *n_rts + 1; +} + +void +rt_init(struct socket_ctx *ctx, int32_t socket_id) +{ + char name[PATH_MAX]; + uint32_t i; + int32_t ret; + struct rte_lpm *lpm; + struct rte_lpm6 *lpm6; + char a, b, c, d; + struct rte_lpm_config conf = { 0 }; + struct rte_lpm6_config conf6 = { 0 }; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->rt_ip4 != NULL) + rte_exit(EXIT_FAILURE, "IPv4 Routing Table for socket %u " + "already initialized\n", socket_id); + + if (ctx->rt_ip6 != NULL) + rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u " + "already initialized\n", socket_id); + + if (nb_rt_ip4 == 0 && nb_rt_ip6 == 0) + RTE_LOG(WARNING, IPSEC, "No Routing rule specified\n"); + + printf("Creating IPv4 Routing Table (RT) context with %u max routes\n", + RT_IPV4_MAX_RULES); + + /* create the LPM table */ + snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id); + conf.max_rules = RT_IPV4_MAX_RULES; + conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES; + lpm = rte_lpm_create(name, socket_id, &conf); + if (lpm == NULL) + rte_exit(EXIT_FAILURE, "Unable to create %s LPM table " + "on socket %d\n", name, socket_id); + + /* populate the LPM table */ + for (i = 0; i < nb_rt_ip4; i++) { + ret = rte_lpm_add(lpm, rt_ip4[i].ip, rt_ip4[i].depth, + rt_ip4[i].if_out); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " + "LPM table on socket %d\n", i, name, socket_id); + + uint32_t_to_char(rt_ip4[i].ip, &a, &b, &c, &d); + printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n", + a, b, c, d, rt_ip4[i].depth, + rt_ip4[i].if_out); + } + + snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id); + conf6.max_rules = RT_IPV6_MAX_RULES; + conf6.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES; + lpm6 = rte_lpm6_create(name, socket_id, &conf6); + if (lpm6 == NULL) + rte_exit(EXIT_FAILURE, "Unable to create %s LPM table " + "on socket %d\n", name, socket_id); + + /* populate the LPM table */ + for (i = 0; i < nb_rt_ip6; i++) { + ret = rte_lpm6_add(lpm6, rt_ip6[i].ip, rt_ip6[i].depth, + rt_ip6[i].if_out); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " + "LPM table on socket %d\n", i, name, socket_id); + + printf("LPM6: Adding route " + " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n", + (uint16_t)((rt_ip6[i].ip[0] << 8) | rt_ip6[i].ip[1]), + (uint16_t)((rt_ip6[i].ip[2] << 8) | rt_ip6[i].ip[3]), + (uint16_t)((rt_ip6[i].ip[4] << 8) | rt_ip6[i].ip[5]), + (uint16_t)((rt_ip6[i].ip[6] << 8) | rt_ip6[i].ip[7]), + (uint16_t)((rt_ip6[i].ip[8] << 8) | rt_ip6[i].ip[9]), + (uint16_t)((rt_ip6[i].ip[10] << 8) | rt_ip6[i].ip[11]), + (uint16_t)((rt_ip6[i].ip[12] << 8) | rt_ip6[i].ip[13]), + (uint16_t)((rt_ip6[i].ip[14] << 8) | rt_ip6[i].ip[15]), + rt_ip6[i].depth, rt_ip6[i].if_out); + } + + ctx->rt_ip4 = (struct rt_ctx *)lpm; + ctx->rt_ip6 = (struct rt_ctx *)lpm6; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sa.c b/src/spdk/dpdk/examples/ipsec-secgw/sa.c new file mode 100644 index 000000000..cd1397531 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sa.c @@ -0,0 +1,1635 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2020 Intel Corporation + */ + +/* + * Security Associations + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> + +#include <rte_memzone.h> +#include <rte_crypto.h> +#include <rte_security.h> +#include <rte_cryptodev.h> +#include <rte_byteorder.h> +#include <rte_errno.h> +#include <rte_ip.h> +#include <rte_random.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> + +#include "ipsec.h" +#include "esp.h" +#include "parser.h" +#include "sad.h" + +#define IPDEFTTL 64 + +#define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT) + +#define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT) + +#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0) + +struct supported_cipher_algo { + const char *keyword; + enum rte_crypto_cipher_algorithm algo; + uint16_t iv_len; + uint16_t block_size; + uint16_t key_len; +}; + +struct supported_auth_algo { + const char *keyword; + enum rte_crypto_auth_algorithm algo; + uint16_t digest_len; + uint16_t key_len; + uint8_t key_not_req; +}; + +struct supported_aead_algo { + const char *keyword; + enum rte_crypto_aead_algorithm algo; + uint16_t iv_len; + uint16_t block_size; + uint16_t digest_len; + uint16_t key_len; + uint8_t aad_len; +}; + + +const struct supported_cipher_algo cipher_algos[] = { + { + .keyword = "null", + .algo = RTE_CRYPTO_CIPHER_NULL, + .iv_len = 0, + .block_size = 4, + .key_len = 0 + }, + { + .keyword = "aes-128-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 16 + }, + { + .keyword = "aes-192-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 24 + }, + { + .keyword = "aes-256-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 32 + }, + { + .keyword = "aes-128-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 8, + .block_size = 4, + .key_len = 20 + }, + { + .keyword = "3des-cbc", + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .iv_len = 8, + .block_size = 8, + .key_len = 24 + } +}; + +const struct supported_auth_algo auth_algos[] = { + { + .keyword = "null", + .algo = RTE_CRYPTO_AUTH_NULL, + .digest_len = 0, + .key_len = 0, + .key_not_req = 1 + }, + { + .keyword = "sha1-hmac", + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .digest_len = 12, + .key_len = 20 + }, + { + .keyword = "sha256-hmac", + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .digest_len = 16, + .key_len = 32 + } +}; + +const struct supported_aead_algo aead_algos[] = { + { + .keyword = "aes-128-gcm", + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 20, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-192-gcm", + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 28, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-256-gcm", + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 36, + .digest_len = 16, + .aad_len = 8, + } +}; + +#define SA_INIT_NB 128 + +static uint32_t nb_crypto_sessions; +struct ipsec_sa *sa_out; +uint32_t nb_sa_out; +static uint32_t sa_out_sz; +static struct ipsec_sa_cnt sa_out_cnt; + +struct ipsec_sa *sa_in; +uint32_t nb_sa_in; +static uint32_t sa_in_sz; +static struct ipsec_sa_cnt sa_in_cnt; + +static const struct supported_cipher_algo * +find_match_cipher_algo(const char *cipher_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + const struct supported_cipher_algo *algo = + &cipher_algos[i]; + + if (strcmp(cipher_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +static const struct supported_auth_algo * +find_match_auth_algo(const char *auth_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + const struct supported_auth_algo *algo = + &auth_algos[i]; + + if (strcmp(auth_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +static const struct supported_aead_algo * +find_match_aead_algo(const char *aead_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(aead_algos); i++) { + const struct supported_aead_algo *algo = + &aead_algos[i]; + + if (strcmp(aead_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +/** parse_key_string + * parse x:x:x:x.... hex number key string into uint8_t *key + * return: + * > 0: number of bytes parsed + * 0: failed + */ +static uint32_t +parse_key_string(const char *key_str, uint8_t *key) +{ + const char *pt_start = key_str, *pt_end = key_str; + uint32_t nb_bytes = 0; + + while (pt_end != NULL) { + char sub_str[3] = {0}; + + pt_end = strchr(pt_start, ':'); + + if (pt_end == NULL) { + if (strlen(pt_start) > 2) + return 0; + strncpy(sub_str, pt_start, 2); + } else { + if (pt_end - pt_start > 2) + return 0; + + strncpy(sub_str, pt_start, pt_end - pt_start); + pt_start = pt_end + 1; + } + + key[nb_bytes++] = strtol(sub_str, NULL, 16); + } + + return nb_bytes; +} + +static int +extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz) +{ + if (*sa_tbl == NULL) { + *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa)); + if (*sa_tbl == NULL) + return -1; + *cur_sz = SA_INIT_NB; + return 0; + } + + if (cur_cnt >= *cur_sz) { + *sa_tbl = realloc(*sa_tbl, + *cur_sz * sizeof(struct ipsec_sa) * 2); + if (*sa_tbl == NULL) + return -1; + /* clean reallocated extra space */ + memset(&(*sa_tbl)[*cur_sz], 0, + *cur_sz * sizeof(struct ipsec_sa)); + *cur_sz *= 2; + } + + return 0; +} + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct ipsec_sa *rule = NULL; + struct rte_ipsec_session *ips; + uint32_t ti; /*token index*/ + uint32_t *ri /*rule index*/; + struct ipsec_sa_cnt *sa_cnt; + uint32_t cipher_algo_p = 0; + uint32_t auth_algo_p = 0; + uint32_t aead_algo_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t mode_p = 0; + uint32_t type_p = 0; + uint32_t portid_p = 0; + uint32_t fallback_p = 0; + int16_t status_p = 0; + + if (strcmp(tokens[0], "in") == 0) { + ri = &nb_sa_in; + sa_cnt = &sa_in_cnt; + if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0) + return; + rule = &sa_in[*ri]; + rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; + } else { + ri = &nb_sa_out; + sa_cnt = &sa_out_cnt; + if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0) + return; + rule = &sa_out[*ri]; + rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; + } + + /* spi number */ + APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); + if (status->status < 0) + return; + if (atoi(tokens[1]) == INVALID_SPI) + return; + rule->spi = atoi(tokens[1]); + rule->portid = UINT16_MAX; + ips = ipsec_get_primary_session(rule); + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "mode") == 0) { + APP_CHECK_PRESENCE(mode_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "ipv4-tunnel") == 0) { + sa_cnt->nb_v4++; + rule->flags = IP4_TUNNEL; + } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) { + sa_cnt->nb_v6++; + rule->flags = IP6_TUNNEL; + } else if (strcmp(tokens[ti], "transport") == 0) { + sa_cnt->nb_v4++; + sa_cnt->nb_v6++; + rule->flags = TRANSPORT; + } else { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + mode_p = 1; + continue; + } + + if (strcmp(tokens[ti], "cipher_algo") == 0) { + const struct supported_cipher_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_cipher_algo(tokens[ti]); + + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + if (status->status < 0) + return; + + rule->cipher_algo = algo->algo; + rule->block_size = algo->block_size; + rule->iv_len = algo->iv_len; + rule->cipher_key_len = algo->key_len; + + /* for NULL algorithm, no cipher key required */ + if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + cipher_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"cipher_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->cipher_key); + APP_CHECK(key_len == rule->cipher_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC || + algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC) + rule->salt = (uint32_t)rte_rand(); + + if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) { + key_len -= 4; + rule->cipher_key_len = key_len; + memcpy(&rule->salt, + &rule->cipher_key[key_len], 4); + } + + cipher_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "auth_algo") == 0) { + const struct supported_auth_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_auth_algo(tokens[ti]); + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + if (status->status < 0) + return; + + rule->auth_algo = algo->algo; + rule->auth_key_len = algo->key_len; + rule->digest_len = algo->digest_len; + + /* NULL algorithm and combined algos do not + * require auth key + */ + if (algo->key_not_req) { + auth_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"auth_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->auth_key); + APP_CHECK(key_len == rule->auth_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + auth_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "aead_algo") == 0) { + const struct supported_aead_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(aead_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_aead_algo(tokens[ti]); + + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + if (status->status < 0) + return; + + rule->aead_algo = algo->algo; + rule->cipher_key_len = algo->key_len; + rule->digest_len = algo->digest_len; + rule->aad_len = algo->aad_len; + rule->block_size = algo->block_size; + rule->iv_len = algo->iv_len; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "aead_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"aead_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->cipher_key); + APP_CHECK(key_len == rule->cipher_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + key_len -= 4; + rule->cipher_key_len = key_len; + memcpy(&rule->salt, + &rule->cipher_key[key_len], 4); + + aead_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (IS_IP4_TUNNEL(rule->flags)) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->src.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (IS_IP6_TUNNEL(rule->flags)) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->src.ip.ip6.ip6_b, + ip.s6_addr, 16); + } else if (IS_TRANSPORT(rule->flags)) { + APP_CHECK(0, status, "unrecognized input " + "\"%s\"", tokens[ti]); + return; + } + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (IS_IP4_TUNNEL(rule->flags)) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->dst.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (IS_IP6_TUNNEL(rule->flags)) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); + } else if (IS_TRANSPORT(rule->flags)) { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "type") == 0) { + APP_CHECK_PRESENCE(type_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "inline-crypto-offload") == 0) + ips->type = + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO; + else if (strcmp(tokens[ti], + "inline-protocol-offload") == 0) + ips->type = + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; + else if (strcmp(tokens[ti], + "lookaside-protocol-offload") == 0) + ips->type = + RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; + else if (strcmp(tokens[ti], "no-offload") == 0) + ips->type = RTE_SECURITY_ACTION_TYPE_NONE; + else if (strcmp(tokens[ti], "cpu-crypto") == 0) + ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; + else { + APP_CHECK(0, status, "Invalid input \"%s\"", + tokens[ti]); + return; + } + + type_p = 1; + continue; + } + + if (strcmp(tokens[ti], "port_id") == 0) { + APP_CHECK_PRESENCE(portid_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + if (rule->portid == UINT16_MAX) + rule->portid = atoi(tokens[ti]); + else if (rule->portid != atoi(tokens[ti])) { + APP_CHECK(0, status, + "portid %s not matching with already assigned portid %u", + tokens[ti], rule->portid); + return; + } + portid_p = 1; + continue; + } + + if (strcmp(tokens[ti], "fallback") == 0) { + struct rte_ipsec_session *fb; + + APP_CHECK(app_sa_prm.enable, status, "Fallback session " + "not allowed for legacy mode."); + if (status->status < 0) + return; + APP_CHECK(ips->type == + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status, + "Fallback session allowed if primary session " + "is of type inline-crypto-offload only."); + if (status->status < 0) + return; + APP_CHECK(rule->direction == + RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status, + "Fallback session not allowed for egress " + "rule"); + if (status->status < 0) + return; + APP_CHECK_PRESENCE(fallback_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + fb = ipsec_get_fallback_session(rule); + if (strcmp(tokens[ti], "lookaside-none") == 0) + fb->type = RTE_SECURITY_ACTION_TYPE_NONE; + else if (strcmp(tokens[ti], "cpu-crypto") == 0) + fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; + else { + APP_CHECK(0, status, "unrecognized fallback " + "type %s.", tokens[ti]); + return; + } + + rule->fallback_sessions = 1; + nb_crypto_sessions++; + fallback_p = 1; + continue; + } + if (strcmp(tokens[ti], "flow-direction") == 0) { + switch (ips->type) { + case RTE_SECURITY_ACTION_TYPE_NONE: + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + rule->fdir_flag = 1; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + if (rule->portid == UINT16_MAX) + rule->portid = atoi(tokens[ti]); + else if (rule->portid != atoi(tokens[ti])) { + APP_CHECK(0, status, + "portid %s not matching with already assigned portid %u", + tokens[ti], rule->portid); + return; + } + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->fdir_qid = atoi(tokens[ti]); + /* validating portid and queueid */ + status_p = check_flow_params(rule->portid, + rule->fdir_qid); + if (status_p < 0) { + printf("port id %u / queue id %u is " + "not valid\n", rule->portid, + rule->fdir_qid); + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + default: + APP_CHECK(0, status, + "flow director not supported for security session type %d", + ips->type); + return; + } + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + if (aead_algo_p) { + APP_CHECK(cipher_algo_p == 0, status, + "AEAD used, no need for cipher options"); + if (status->status < 0) + return; + + APP_CHECK(auth_algo_p == 0, status, + "AEAD used, no need for auth options"); + if (status->status < 0) + return; + } else { + APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options"); + if (status->status < 0) + return; + + APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options"); + if (status->status < 0) + return; + } + + APP_CHECK(mode_p == 1, status, "missing mode option"); + if (status->status < 0) + return; + + if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type != + RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0)) + printf("Missing portid option, falling back to non-offload\n"); + + if (!type_p || (!portid_p && ips->type != + RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) { + ips->type = RTE_SECURITY_ACTION_TYPE_NONE; + } + + nb_crypto_sessions++; + *ri = *ri + 1; +} + +static void +print_one_sa_rule(const struct ipsec_sa *sa, int inbound) +{ + uint32_t i; + uint8_t a, b, c, d; + const struct rte_ipsec_session *ips; + const struct rte_ipsec_session *fallback_ips; + + printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + if (cipher_algos[i].algo == sa->cipher_algo && + cipher_algos[i].key_len == sa->cipher_key_len) { + printf("%s ", cipher_algos[i].keyword); + break; + } + } + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + if (auth_algos[i].algo == sa->auth_algo) { + printf("%s ", auth_algos[i].keyword); + break; + } + } + + for (i = 0; i < RTE_DIM(aead_algos); i++) { + if (aead_algos[i].algo == sa->aead_algo && + aead_algos[i].key_len-4 == sa->cipher_key_len) { + printf("%s ", aead_algos[i].keyword); + break; + } + } + + printf("mode:"); + + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { + case IP4_TUNNEL: + printf("IP4Tunnel "); + uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); + uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); + break; + case IP6_TUNNEL: + printf("IP6Tunnel "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->src.ip.ip6.ip6_b[i]); + } + printf(" "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); + } + break; + case TRANSPORT: + printf("Transport "); + break; + } + + ips = &sa->sessions[IPSEC_SESSION_PRIMARY]; + printf(" type:"); + switch (ips->type) { + case RTE_SECURITY_ACTION_TYPE_NONE: + printf("no-offload "); + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + printf("inline-crypto-offload "); + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + printf("inline-protocol-offload "); + break; + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + printf("lookaside-protocol-offload "); + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + printf("cpu-crypto-accelerated "); + break; + } + + fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK]; + if (fallback_ips != NULL && sa->fallback_sessions > 0) { + printf("inline fallback: "); + switch (fallback_ips->type) { + case RTE_SECURITY_ACTION_TYPE_NONE: + printf("lookaside-none"); + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + printf("cpu-crypto-accelerated"); + break; + default: + printf("invalid"); + break; + } + } + if (sa->fdir_flag == 1) + printf("flow-direction port %d queue %d", sa->portid, + sa->fdir_qid); + + printf("\n"); +} + +static struct sa_ctx * +sa_create(const char *name, int32_t socket_id, uint32_t nb_sa) +{ + char s[PATH_MAX]; + struct sa_ctx *sa_ctx; + uint32_t mz_size; + const struct rte_memzone *mz; + + snprintf(s, sizeof(s), "%s_%u", name, socket_id); + + /* Create SA context */ + printf("Creating SA context with %u maximum entries on socket %d\n", + nb_sa, socket_id); + + mz_size = sizeof(struct ipsec_xf) * nb_sa; + mz = rte_memzone_reserve(s, mz_size, socket_id, + RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) { + printf("Failed to allocate SA XFORM memory\n"); + rte_errno = ENOMEM; + return NULL; + } + + sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) + + sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE); + + if (sa_ctx == NULL) { + printf("Failed to allocate SA CTX memory\n"); + rte_errno = ENOMEM; + rte_memzone_free(mz); + return NULL; + } + + sa_ctx->xf = (struct ipsec_xf *)mz->addr; + sa_ctx->nb_sa = nb_sa; + + return sa_ctx; +} + +static int +check_eth_dev_caps(uint16_t portid, uint32_t inbound) +{ + struct rte_eth_dev_info dev_info; + int retval; + + retval = rte_eth_dev_info_get(portid, &dev_info); + if (retval != 0) { + RTE_LOG(ERR, IPSEC, + "Error during getting device (port %u) info: %s\n", + portid, strerror(-retval)); + + return retval; + } + + if (inbound) { + if ((dev_info.rx_offload_capa & + DEV_RX_OFFLOAD_SECURITY) == 0) { + RTE_LOG(WARNING, PORT, + "hardware RX IPSec offload is not supported\n"); + return -EINVAL; + } + + } else { /* outbound */ + if ((dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_SECURITY) == 0) { + RTE_LOG(WARNING, PORT, + "hardware TX IPSec offload is not supported\n"); + return -EINVAL; + } + } + return 0; +} + +/* + * Helper function, tries to determine next_proto for SPI + * by searching though SP rules. + */ +static int +get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir, + struct ip_addr ip_addr[2], uint32_t mask[2]) +{ + int32_t rc4, rc6; + + rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + ip_addr, mask); + rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + ip_addr, mask); + + if (rc4 >= 0) { + if (rc6 >= 0) { + RTE_LOG(ERR, IPSEC, + "%s: SPI %u used simultaeously by " + "IPv4(%d) and IPv6 (%d) SP rules\n", + __func__, spi, rc4, rc6); + return -EINVAL; + } else + return IPPROTO_IPIP; + } else if (rc6 < 0) { + RTE_LOG(ERR, IPSEC, + "%s: SPI %u is not used by any SP rule\n", + __func__, spi); + return -EINVAL; + } else + return IPPROTO_IPV6; +} + +/* + * Helper function for getting source and destination IP addresses + * from SP. Needed for inline crypto transport mode, as addresses are not + * provided in config file for that mode. It checks if SP for current SA exists, + * and based on what type of protocol is returned, it stores appropriate + * addresses got from SP into SA. + */ +static int +sa_add_address_inline_crypto(struct ipsec_sa *sa) +{ + int protocol; + struct ip_addr ip_addr[2]; + uint32_t mask[2]; + + protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask); + if (protocol < 0) + return protocol; + else if (protocol == IPPROTO_IPIP) { + sa->flags |= IP4_TRANSPORT; + if (mask[0] == IP4_FULL_MASK && + mask[1] == IP4_FULL_MASK && + ip_addr[0].ip.ip4 != 0 && + ip_addr[1].ip.ip4 != 0) { + + sa->src.ip.ip4 = ip_addr[0].ip.ip4; + sa->dst.ip.ip4 = ip_addr[1].ip.ip4; + } else { + RTE_LOG(ERR, IPSEC, + "%s: No valid address or mask entry in" + " IPv4 SP rule for SPI %u\n", + __func__, sa->spi); + return -EINVAL; + } + } else if (protocol == IPPROTO_IPV6) { + sa->flags |= IP6_TRANSPORT; + if (mask[0] == IP6_FULL_MASK && + mask[1] == IP6_FULL_MASK && + (ip_addr[0].ip.ip6.ip6[0] != 0 || + ip_addr[0].ip.ip6.ip6[1] != 0) && + (ip_addr[1].ip.ip6.ip6[0] != 0 || + ip_addr[1].ip.ip6.ip6[1] != 0)) { + + sa->src.ip.ip6 = ip_addr[0].ip.ip6; + sa->dst.ip.ip6 = ip_addr[1].ip.ip6; + } else { + RTE_LOG(ERR, IPSEC, + "%s: No valid address or mask entry in" + " IPv6 SP rule for SPI %u\n", + __func__, sa->spi); + return -EINVAL; + } + } + return 0; +} + +static int +sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries, uint32_t inbound, + struct socket_ctx *skt_ctx) +{ + struct ipsec_sa *sa; + uint32_t i, idx; + uint16_t iv_length, aad_length; + int inline_status; + int32_t rc; + struct rte_ipsec_session *ips; + + /* for ESN upper 32 bits of SQN also need to be part of AAD */ + aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0; + + for (i = 0; i < nb_entries; i++) { + idx = i; + sa = &sa_ctx->sa[idx]; + if (sa->spi != 0) { + printf("Index %u already in use by SPI %u\n", + idx, sa->spi); + return -EINVAL; + } + *sa = entries[i]; + + if (inbound) { + rc = ipsec_sad_add(&sa_ctx->sad, sa); + if (rc != 0) + return rc; + } + + sa->seq = 0; + ips = ipsec_get_primary_session(sa); + + if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || + ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (check_eth_dev_caps(sa->portid, inbound)) + return -EINVAL; + } + + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { + case IP4_TUNNEL: + sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); + sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); + break; + case TRANSPORT: + if (ips->type == + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + inline_status = + sa_add_address_inline_crypto(sa); + if (inline_status < 0) + return inline_status; + } + break; + } + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + iv_length = 12; + + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; + sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; + sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.aead.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.aead.op = (inbound == 1) ? + RTE_CRYPTO_AEAD_OP_DECRYPT : + RTE_CRYPTO_AEAD_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.aead.iv.length = iv_length; + sa_ctx->xf[idx].a.aead.aad_length = + sa->aad_len + aad_length; + sa_ctx->xf[idx].a.aead.digest_length = + sa->digest_len; + + sa->xforms = &sa_ctx->xf[idx].a; + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: + case RTE_CRYPTO_CIPHER_AES_CBC: + iv_length = sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + iv_length = 16; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, + "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + if (inbound) { + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].b.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].b.cipher.op = + RTE_CRYPTO_CIPHER_OP_DECRYPT; + sa_ctx->xf[idx].b.next = NULL; + sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].b.cipher.iv.length = iv_length; + + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].a.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].a.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].a.auth.op = + RTE_CRYPTO_AUTH_OP_VERIFY; + } else { /* outbound */ + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.cipher.op = + RTE_CRYPTO_CIPHER_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.cipher.iv.length = iv_length; + + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].b.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].b.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].b.auth.op = + RTE_CRYPTO_AUTH_OP_GENERATE; + } + + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; + sa_ctx->xf[idx].b.next = NULL; + sa->xforms = &sa_ctx->xf[idx].a; + } + + if (ips->type == + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || + ips->type == + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + rc = create_inline_session(skt_ctx, sa, ips); + if (rc != 0) { + RTE_LOG(ERR, IPSEC_ESP, + "create_inline_session() failed\n"); + return -EINVAL; + } + } + + if (sa->fdir_flag && inbound) { + rc = create_ipsec_esp_flow(sa); + if (rc != 0) + RTE_LOG(ERR, IPSEC_ESP, + "create_ipsec_esp_flow() failed\n"); + } + print_one_sa_rule(sa, inbound); + } + + return 0; +} + +static inline int +sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries, struct socket_ctx *skt_ctx) +{ + return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx); +} + +static inline int +sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries, struct socket_ctx *skt_ctx) +{ + return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx); +} + +/* + * helper function, fills parameters that are identical for all SAs + */ +static void +fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm, + const struct app_sa_prm *app_prm) +{ + memset(prm, 0, sizeof(*prm)); + + prm->flags = app_prm->flags; + prm->ipsec_xform.options.esn = app_prm->enable_esn; + prm->ipsec_xform.replay_win_sz = app_prm->window_size; +} + +static int +fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, + const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6) +{ + int32_t rc; + + /* + * Try to get SPI next proto by searching that SPI in SPD. + * probably not the optimal way, but there seems nothing + * better right now. + */ + rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL); + if (rc < 0) + return rc; + + fill_ipsec_app_sa_prm(prm, &app_sa_prm); + prm->userdata = (uintptr_t)ss; + + /* setup ipsec xform */ + prm->ipsec_xform.spi = ss->spi; + prm->ipsec_xform.salt = ss->salt; + prm->ipsec_xform.direction = ss->direction; + prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP; + prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ? + RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT : + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; + prm->ipsec_xform.options.ecn = 1; + prm->ipsec_xform.options.copy_dscp = 1; + + if (IS_IP4_TUNNEL(ss->flags)) { + prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; + prm->tun.hdr_len = sizeof(*v4); + prm->tun.next_proto = rc; + prm->tun.hdr = v4; + } else if (IS_IP6_TUNNEL(ss->flags)) { + prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6; + prm->tun.hdr_len = sizeof(*v6); + prm->tun.next_proto = rc; + prm->tun.hdr = v6; + } else { + /* transport mode */ + prm->trs.proto = rc; + } + + /* setup crypto section */ + prm->crypto_xform = ss->xforms; + return 0; +} + +static int +fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa) +{ + int32_t rc = 0; + + ss->sa = sa; + + if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { + if (ss->security.ses != NULL) { + rc = rte_ipsec_session_prepare(ss); + if (rc != 0) + memset(ss, 0, sizeof(*ss)); + } + } + + return rc; +} + +/* + * Initialise related rte_ipsec_sa object. + */ +static int +ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) +{ + int rc; + struct rte_ipsec_sa_prm prm; + struct rte_ipsec_session *ips; + struct rte_ipv4_hdr v4 = { + .version_ihl = IPVERSION << 4 | + sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER, + .time_to_live = IPDEFTTL, + .next_proto_id = IPPROTO_ESP, + .src_addr = lsa->src.ip.ip4, + .dst_addr = lsa->dst.ip.ip4, + }; + struct rte_ipv6_hdr v6 = { + .vtc_flow = htonl(IP6_VERSION << 28), + .proto = IPPROTO_ESP, + }; + + if (IS_IP6_TUNNEL(lsa->flags)) { + memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr)); + memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr)); + } + + rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6); + if (rc == 0) + rc = rte_ipsec_sa_init(sa, &prm, sa_size); + if (rc < 0) + return rc; + + /* init primary processing session */ + ips = ipsec_get_primary_session(lsa); + rc = fill_ipsec_session(ips, sa); + if (rc != 0) + return rc; + + /* init inline fallback processing session */ + if (lsa->fallback_sessions == 1) + rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa); + + return rc; +} + +/* + * Allocate space and init rte_ipsec_sa strcutures, + * one per session. + */ +static int +ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket) +{ + int32_t rc, sz; + uint32_t i, idx; + size_t tsz; + struct rte_ipsec_sa *sa; + struct ipsec_sa *lsa; + struct rte_ipsec_sa_prm prm; + + /* determine SA size */ + idx = 0; + fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL); + sz = rte_ipsec_sa_size(&prm); + if (sz < 0) { + RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): " + "failed to determine SA size, error code: %d\n", + __func__, ctx, nb_ent, socket, sz); + return sz; + } + + tsz = sz * nb_ent; + + ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket); + if (ctx->satbl == NULL) { + RTE_LOG(ERR, IPSEC, + "%s(%p, %u, %d): failed to allocate %zu bytes\n", + __func__, ctx, nb_ent, socket, tsz); + return -ENOMEM; + } + + rc = 0; + for (i = 0; i != nb_ent && rc == 0; i++) { + + idx = i; + + sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i); + lsa = ctx->sa + idx; + + rc = ipsec_sa_init(lsa, sa, sz); + } + + return rc; +} + +static int +sa_cmp(const void *p, const void *q) +{ + uint32_t spi1 = ((const struct ipsec_sa *)p)->spi; + uint32_t spi2 = ((const struct ipsec_sa *)q)->spi; + + return (int)(spi1 - spi2); +} + +/* + * Walk through all SA rules to find an SA with given SPI + */ +int +sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound) +{ + uint32_t num; + struct ipsec_sa *sa; + struct ipsec_sa tmpl; + const struct ipsec_sa *sar; + + sar = sa_ctx->sa; + if (inbound != 0) + num = nb_sa_in; + else + num = nb_sa_out; + + tmpl.spi = spi; + + sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp); + if (sa != NULL) + return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa); + + return -ENOENT; +} + +void +sa_init(struct socket_ctx *ctx, int32_t socket_id) +{ + int32_t rc; + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sa_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " + "initialized\n", socket_id); + + if (ctx->sa_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " + "initialized\n", socket_id); + + if (nb_sa_in > 0) { + name = "sa_in"; + ctx->sa_in = sa_create(name, socket_id, nb_sa_in); + if (ctx->sa_in == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); + + rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id, + &sa_in_cnt); + if (rc != 0) + rte_exit(EXIT_FAILURE, "failed to init SAD\n"); + + sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx); + + if (app_sa_prm.enable != 0) { + rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in, + socket_id); + if (rc != 0) + rte_exit(EXIT_FAILURE, + "failed to init inbound SAs\n"); + } + } else + RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); + + if (nb_sa_out > 0) { + name = "sa_out"; + ctx->sa_out = sa_create(name, socket_id, nb_sa_out); + if (ctx->sa_out == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); + + sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx); + + if (app_sa_prm.enable != 0) { + rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out, + socket_id); + if (rc != 0) + rte_exit(EXIT_FAILURE, + "failed to init outbound SAs\n"); + } + } else + RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " + "specified\n"); +} + +int +inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) +{ + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + + priv = get_priv(m); + sa = priv->sa; + if (sa != NULL) + return (sa_ctx->sa[sa_idx].spi == sa->spi); + + RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); + return 0; +} + +void +inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], + void *sa_arr[], uint16_t nb_pkts) +{ + uint32_t i; + void *result_sa; + struct ipsec_sa *sa; + + sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts); + + /* + * Mark need for inline offload fallback on the LSB of SA pointer. + * Thanks to packet grouping mechanism which ipsec_process is using + * packets marked for fallback processing will form separate group. + * + * Because it is not safe to use SA pointer it is casted to generic + * pointer to prevent from unintentional use. Use ipsec_mask_saptr + * to get valid struct pointer. + */ + for (i = 0; i < nb_pkts; i++) { + if (sa_arr[i] == NULL) + continue; + + result_sa = sa = sa_arr[i]; + if (MBUF_NO_SEC_OFFLOAD(pkts[i]) && + sa->fallback_sessions > 0) { + uintptr_t intsa = (uintptr_t)sa; + intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG; + result_sa = (void *)intsa; + } + sa_arr[i] = result_sa; + } +} + +void +outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], + void *sa[], uint16_t nb_pkts) +{ + uint32_t i; + + for (i = 0; i < nb_pkts; i++) + sa[i] = &sa_ctx->sa[sa_idx[i]]; +} + +/* + * Select HW offloads to be used. + */ +int +sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, + uint64_t *tx_offloads) +{ + struct ipsec_sa *rule; + uint32_t idx_sa; + enum rte_security_session_action_type rule_type; + + *rx_offloads = 0; + *tx_offloads = 0; + + /* Check for inbound rules that use offloads and use this port */ + for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { + rule = &sa_in[idx_sa]; + rule_type = ipsec_get_action_type(rule); + if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + rule_type == + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + && rule->portid == port_id) + *rx_offloads |= DEV_RX_OFFLOAD_SECURITY; + } + + /* Check for outbound rules that use offloads and use this port */ + for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { + rule = &sa_out[idx_sa]; + rule_type = ipsec_get_action_type(rule); + if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + rule_type == + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + && rule->portid == port_id) + *tx_offloads |= DEV_TX_OFFLOAD_SECURITY; + } + return 0; +} + +void +sa_sort_arr(void) +{ + qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp); + qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp); +} + +uint32_t +get_nb_crypto_sessions(void) +{ + return nb_crypto_sessions; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sad.c b/src/spdk/dpdk/examples/ipsec-secgw/sad.c new file mode 100644 index 000000000..5b2c0e679 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sad.c @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include <rte_errno.h> +#include <rte_malloc.h> + +#include "ipsec.h" +#include "sad.h" + +RTE_DEFINE_PER_LCORE(struct ipsec_sad_cache, sad_cache) = { + .v4 = NULL, + .v6 = NULL, + .mask = 0, +}; + +int +ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa) +{ + int ret; + void *tmp = NULL; + union rte_ipsec_sad_key key = { {0} }; + const union rte_ipsec_sad_key *lookup_key[1]; + + /* spi field is common for ipv4 and ipv6 key types */ + key.v4.spi = rte_cpu_to_be_32(sa->spi); + lookup_key[0] = &key; + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { + case IP4_TUNNEL: + rte_ipsec_sad_lookup(sad->sad_v4, lookup_key, &tmp, 1); + if (tmp != NULL) + return -EEXIST; + + ret = rte_ipsec_sad_add(sad->sad_v4, &key, + RTE_IPSEC_SAD_SPI_ONLY, sa); + if (ret != 0) + return ret; + break; + case IP6_TUNNEL: + rte_ipsec_sad_lookup(sad->sad_v6, lookup_key, &tmp, 1); + if (tmp != NULL) + return -EEXIST; + + ret = rte_ipsec_sad_add(sad->sad_v6, &key, + RTE_IPSEC_SAD_SPI_ONLY, sa); + if (ret != 0) + return ret; + break; + case TRANSPORT: + if (sp4_spi_present(sa->spi, 1, NULL, NULL) >= 0) { + rte_ipsec_sad_lookup(sad->sad_v4, lookup_key, &tmp, 1); + if (tmp != NULL) + return -EEXIST; + + ret = rte_ipsec_sad_add(sad->sad_v4, &key, + RTE_IPSEC_SAD_SPI_ONLY, sa); + if (ret != 0) + return ret; + } + if (sp6_spi_present(sa->spi, 1, NULL, NULL) >= 0) { + rte_ipsec_sad_lookup(sad->sad_v6, lookup_key, &tmp, 1); + if (tmp != NULL) + return -EEXIST; + + ret = rte_ipsec_sad_add(sad->sad_v6, &key, + RTE_IPSEC_SAD_SPI_ONLY, sa); + if (ret != 0) + return ret; + } + } + + return 0; +} + +/* + * Init per lcore SAD cache. + * Must be called by every processing lcore. + */ +int +ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent) +{ + uint32_t cache_elem; + size_t cache_mem_sz; + struct ipsec_sad_cache *cache; + + cache = &RTE_PER_LCORE(sad_cache); + + cache_elem = rte_align32pow2(nb_cache_ent); + cache_mem_sz = sizeof(struct ipsec_sa *) * cache_elem; + + if (cache_mem_sz != 0) { + cache->v4 = rte_zmalloc_socket(NULL, cache_mem_sz, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (cache->v4 == NULL) + return -rte_errno; + + cache->v6 = rte_zmalloc_socket(NULL, cache_mem_sz, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (cache->v6 == NULL) + return -rte_errno; + + cache->mask = cache_elem - 1; + } + + return 0; +} + +int +ipsec_sad_create(const char *name, struct ipsec_sad *sad, + int socket_id, struct ipsec_sa_cnt *sa_cnt) +{ + int ret; + struct rte_ipsec_sad_conf sad_conf; + char sad_name[RTE_IPSEC_SAD_NAMESIZE]; + + if ((name == NULL) || (sad == NULL) || (sa_cnt == NULL)) + return -EINVAL; + + ret = snprintf(sad_name, RTE_IPSEC_SAD_NAMESIZE, "%s_v4", name); + if (ret < 0 || ret >= RTE_IPSEC_SAD_NAMESIZE) + return -ENAMETOOLONG; + + sad_conf.socket_id = socket_id; + sad_conf.flags = 0; + /* Make SAD have extra 25% of required number of entries */ + sad_conf.max_sa[RTE_IPSEC_SAD_SPI_ONLY] = sa_cnt->nb_v4 * 5 / 4; + sad_conf.max_sa[RTE_IPSEC_SAD_SPI_DIP] = 0; + sad_conf.max_sa[RTE_IPSEC_SAD_SPI_DIP_SIP] = 0; + + if (sa_cnt->nb_v4 != 0) { + sad->sad_v4 = rte_ipsec_sad_create(sad_name, &sad_conf); + if (sad->sad_v4 == NULL) + return -rte_errno; + } + + ret = snprintf(sad_name, RTE_IPSEC_SAD_NAMESIZE, "%s_v6", name); + if (ret < 0 || ret >= RTE_IPSEC_SAD_NAMESIZE) + return -ENAMETOOLONG; + sad_conf.flags = RTE_IPSEC_SAD_FLAG_IPV6; + sad_conf.max_sa[RTE_IPSEC_SAD_SPI_ONLY] = sa_cnt->nb_v6 * 5 / 4; + + if (sa_cnt->nb_v6 != 0) { + sad->sad_v6 = rte_ipsec_sad_create(name, &sad_conf); + if (sad->sad_v6 == NULL) + return -rte_errno; + } + + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sad.h b/src/spdk/dpdk/examples/ipsec-secgw/sad.h new file mode 100644 index 000000000..473aaa938 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sad.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef __SAD_H__ +#define __SAD_H__ + +#include <rte_ipsec_sad.h> + +#define SA_CACHE_SZ 128 +#define SPI2IDX(spi, mask) ((spi) & (mask)) + +struct ipsec_sad_cache { + struct ipsec_sa **v4; + struct ipsec_sa **v6; + uint32_t mask; +}; + +RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache); + +int ipsec_sad_create(const char *name, struct ipsec_sad *sad, + int socket_id, struct ipsec_sa_cnt *sa_cnt); + +int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa); + +int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent); + +static inline int +cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4, + struct rte_ipv6_hdr *ipv6) +{ + int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags); + if ((sa_type == TRANSPORT) || + /* IPv4 check */ + (is_v4 && (sa_type == IP4_TUNNEL) && + (sa->src.ip.ip4 == ipv4->src_addr) && + (sa->dst.ip.ip4 == ipv4->dst_addr)) || + /* IPv6 check */ + (!is_v4 && (sa_type == IP6_TUNNEL) && + (!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) && + (!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16)))) + return 1; + + return 0; +} + +static inline void +sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask) +{ + uint32_t cache_idx; + + /* SAD cache is disabled */ + if (mask == 0) + return; + + cache_idx = SPI2IDX(sa->spi, mask); + sa_cache[cache_idx] = sa; +} + +static inline void +sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[], + void *sa[], uint16_t nb_pkts) +{ + uint32_t i; + uint32_t nb_v4 = 0, nb_v6 = 0; + struct rte_esp_hdr *esp; + struct rte_ipv4_hdr *ipv4; + struct rte_ipv6_hdr *ipv6; + struct rte_ipsec_sadv4_key v4[nb_pkts]; + struct rte_ipsec_sadv6_key v6[nb_pkts]; + int v4_idxes[nb_pkts]; + int v6_idxes[nb_pkts]; + const union rte_ipsec_sad_key *keys_v4[nb_pkts]; + const union rte_ipsec_sad_key *keys_v6[nb_pkts]; + void *v4_res[nb_pkts]; + void *v6_res[nb_pkts]; + uint32_t spi, cache_idx; + struct ipsec_sad_cache *cache; + struct ipsec_sa *cached_sa; + int is_ipv4; + + cache = &RTE_PER_LCORE(sad_cache); + + /* split received packets by address family into two arrays */ + for (i = 0; i < nb_pkts; i++) { + ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *); + ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *); + esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *, + pkts[i]->l3_len); + + is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4; + spi = rte_be_to_cpu_32(esp->spi); + cache_idx = SPI2IDX(spi, cache->mask); + + if (is_ipv4) { + cached_sa = (cache->mask != 0) ? + cache->v4[cache_idx] : NULL; + /* check SAD cache entry */ + if ((cached_sa != NULL) && (cached_sa->spi == spi)) { + if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) { + /* cache hit */ + sa[i] = cached_sa; + continue; + } + } + /* + * cache miss + * preparing sad key to proceed with sad lookup + */ + v4[nb_v4].spi = esp->spi; + v4[nb_v4].dip = ipv4->dst_addr; + v4[nb_v4].sip = ipv4->src_addr; + keys_v4[nb_v4] = (const union rte_ipsec_sad_key *) + &v4[nb_v4]; + v4_idxes[nb_v4++] = i; + } else { + cached_sa = (cache->mask != 0) ? + cache->v6[cache_idx] : NULL; + if ((cached_sa != NULL) && (cached_sa->spi == spi)) { + if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) { + sa[i] = cached_sa; + continue; + } + } + v6[nb_v6].spi = esp->spi; + memcpy(v6[nb_v6].dip, ipv6->dst_addr, + sizeof(ipv6->dst_addr)); + memcpy(v6[nb_v6].sip, ipv6->src_addr, + sizeof(ipv6->src_addr)); + keys_v6[nb_v6] = (const union rte_ipsec_sad_key *) + &v6[nb_v6]; + v6_idxes[nb_v6++] = i; + } + } + + if (nb_v4 != 0) + rte_ipsec_sad_lookup(sad->sad_v4, keys_v4, v4_res, nb_v4); + if (nb_v6 != 0) + rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6); + + for (i = 0; i < nb_v4; i++) { + ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]], + struct rte_ipv4_hdr *); + if ((v4_res[i] != NULL) && + (cmp_sa_key(v4_res[i], 1, ipv4, NULL))) { + sa[v4_idxes[i]] = v4_res[i]; + sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i], + cache->mask); + } else + sa[v4_idxes[i]] = NULL; + } + for (i = 0; i < nb_v6; i++) { + ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]], + struct rte_ipv6_hdr *); + if ((v6_res[i] != NULL) && + (cmp_sa_key(v6_res[i], 0, NULL, ipv6))) { + sa[v6_idxes[i]] = v6_res[i]; + sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i], + cache->mask); + } else + sa[v6_idxes[i]] = NULL; + } +} + +#endif /* __SAD_H__ */ diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sp4.c b/src/spdk/dpdk/examples/ipsec-secgw/sp4.c new file mode 100644 index 000000000..beddd7bc1 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sp4.c @@ -0,0 +1,648 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/* + * Security Policies + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#include <rte_acl.h> +#include <rte_ip.h> + +#include "ipsec.h" +#include "parser.h" + +#define INIT_ACL_RULE_NUM 128 + +#define IPV4_DST_FROM_SP(acr) \ + (rte_cpu_to_be_32((acr).field[DST_FIELD_IPV4].value.u32)) + +#define IPV4_SRC_FROM_SP(acr) \ + (rte_cpu_to_be_32((acr).field[SRC_FIELD_IPV4].value.u32)) + +#define IPV4_DST_MASK_FROM_SP(acr) \ + ((acr).field[DST_FIELD_IPV4].mask_range.u32) + +#define IPV4_SRC_MASK_FROM_SP(acr) \ + ((acr).field[SRC_FIELD_IPV4].mask_range.u32) + +/* + * Rule and trace formats definitions. + */ +enum { + PROTO_FIELD_IPV4, + SRC_FIELD_IPV4, + DST_FIELD_IPV4, + SRCP_FIELD_IPV4, + DSTP_FIELD_IPV4, + NUM_FIELDS_IPV4 +}; + +/* + * That effectively defines order of IPV4 classifications: + * - PROTO + * - SRC IP ADDRESS + * - DST IP ADDRESS + * - PORTS (SRC and DST) + */ +enum { + RTE_ACL_IPV4_PROTO, + RTE_ACL_IPV4_SRC, + RTE_ACL_IPV4_DST, + RTE_ACL_IPV4_PORTS, + RTE_ACL_IPV4_NUM +}; + +static struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = { + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = PROTO_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PROTO, + .offset = 0, + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = SRC_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_SRC, + .offset = offsetof(struct ip, ip_src) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = DST_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_DST, + .offset = offsetof(struct ip, ip_dst) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = SRCP_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PORTS, + .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = DSTP_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PORTS, + .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + + sizeof(uint16_t) + }, +}; + +RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs)); + +static struct acl4_rules *acl4_rules_out; +static uint32_t nb_acl4_rules_out; +static uint32_t sp_out_sz; + +static struct acl4_rules *acl4_rules_in; +static uint32_t nb_acl4_rules_in; +static uint32_t sp_in_sz; + +static int +extend_sp_arr(struct acl4_rules **sp_tbl, uint32_t cur_cnt, uint32_t *cur_sz) +{ + if (*sp_tbl == NULL) { + *sp_tbl = calloc(INIT_ACL_RULE_NUM, sizeof(struct acl4_rules)); + if (*sp_tbl == NULL) + return -1; + *cur_sz = INIT_ACL_RULE_NUM; + return 0; + } + + if (cur_cnt >= *cur_sz) { + *sp_tbl = realloc(*sp_tbl, + *cur_sz * sizeof(struct acl4_rules) * 2); + if (*sp_tbl == NULL) + return -1; + /* clean reallocated extra space */ + memset(&(*sp_tbl)[*cur_sz], 0, + *cur_sz * sizeof(struct acl4_rules)); + *cur_sz *= 2; + } + + return 0; +} + + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl4_rules *rule_ipv4 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + uint32_t tv; + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl4_rules_in; + + if (extend_sp_arr(&acl4_rules_in, nb_acl4_rules_in, + &sp_in_sz) < 0) + return; + + rule_ipv4 = &acl4_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl4_rules_out; + + if (extend_sp_arr(&acl4_rules_out, nb_acl4_rules_out, + &sp_out_sz) < 0) + return; + + rule_ipv4 = &acl4_rules_out[*ri]; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; + } + + rule_ipv4->data.category_mask = 1; + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + tv = atoi(tokens[ti]); + APP_CHECK(tv != DISCARD && tv != BYPASS, status, + "invalid SPI: %s", tokens[ti]); + if (status->status < 0) + return; + rule_ipv4->data.userdata = tv; + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[1].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[1].mask_range.u32 = + depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[2].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[2].mask_range.u32 = + depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv4->field[0].value.u8 = (uint8_t)low; + rule_ipv4->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[3].value.u16 = port_low; + rule_ipv4->field[3].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[4].value.u16 = port_low; + rule_ipv4->field[4].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static void +print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra) +{ + uint8_t a, b, c, d; + + uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32, + &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d, + rule->field[SRC_FIELD_IPV4].mask_range.u32); + uint32_t_to_char(rule->field[DST_FIELD_IPV4].value.u32, + &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d, + rule->field[DST_FIELD_IPV4].mask_range.u32); + printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ", + rule->field[SRCP_FIELD_IPV4].value.u16, + rule->field[SRCP_FIELD_IPV4].mask_range.u16, + rule->field[DSTP_FIELD_IPV4].value.u16, + rule->field[DSTP_FIELD_IPV4].mask_range.u16, + rule->field[PROTO_FIELD_IPV4].value.u8, + rule->field[PROTO_FIELD_IPV4].mask_range.u8); + if (extra) + printf("0x%x-0x%x-0x%x ", + rule->data.category_mask, + rule->data.priority, + rule->data.userdata); +} + +static inline void +dump_ip4_rules(const struct acl4_rules *rule, int32_t num, int32_t extra) +{ + int32_t i; + + for (i = 0; i < num; i++, rule++) { + printf("\t%d:", i + 1); + print_one_ip4_rule(rule, extra); + printf("\n"); + } +} + +static struct rte_acl_ctx * +acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules, + uint32_t rules_nb) +{ + char s[PATH_MAX]; + struct rte_acl_param acl_param; + struct rte_acl_config acl_build_param; + struct rte_acl_ctx *ctx; + + printf("Creating SP context with %u rules\n", rules_nb); + + memset(&acl_param, 0, sizeof(acl_param)); + + /* Create ACL contexts */ + snprintf(s, sizeof(s), "%s_%d", name, socketid); + + printf("IPv4 %s entries [%u]:\n", s, rules_nb); + dump_ip4_rules(rules, rules_nb, 1); + + acl_param.name = s; + acl_param.socket_id = socketid; + acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip4_defs)); + acl_param.max_rule_num = rules_nb; + + ctx = rte_acl_create(&acl_param); + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + + if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules, + rules_nb) < 0) + rte_exit(EXIT_FAILURE, "add rules failed\n"); + + /* Perform builds */ + memset(&acl_build_param, 0, sizeof(acl_build_param)); + + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + acl_build_param.num_fields = RTE_DIM(ip4_defs); + memcpy(&acl_build_param.defs, ip4_defs, sizeof(ip4_defs)); + + if (rte_acl_build(ctx, &acl_build_param) != 0) + rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n"); + + rte_acl_dump(ctx); + + return ctx; +} + +/* + * check that for each rule it's SPI has a correspondent entry in SAD + */ +static int +check_spi_value(struct sa_ctx *sa_ctx, int inbound) +{ + uint32_t i, num, spi; + int32_t spi_idx; + struct acl4_rules *acr; + + if (inbound != 0) { + acr = acl4_rules_in; + num = nb_acl4_rules_in; + } else { + acr = acl4_rules_out; + num = nb_acl4_rules_out; + } + + for (i = 0; i != num; i++) { + spi = acr[i].data.userdata; + if (spi != DISCARD && spi != BYPASS) { + spi_idx = sa_spi_present(sa_ctx, spi, inbound); + if (spi_idx < 0) { + RTE_LOG(ERR, IPSEC, + "SPI %u is not present in SAD\n", + spi); + return -ENOENT; + } + /* Update userdata with spi index */ + acr[i].data.userdata = spi_idx + 1; + } + } + + return 0; +} + +void +sp4_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sp_ip4_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already " + "initialized\n", socket_id); + + if (ctx->sp_ip4_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already " + "initialized\n", socket_id); + + if (check_spi_value(ctx->sa_in, 1) < 0) + rte_exit(EXIT_FAILURE, + "Inbound IPv4 SP DB has unmatched in SAD SPIs\n"); + + if (check_spi_value(ctx->sa_out, 0) < 0) + rte_exit(EXIT_FAILURE, + "Outbound IPv4 SP DB has unmatched in SAD SPIs\n"); + + if (nb_acl4_rules_in > 0) { + name = "sp_ip4_in"; + ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_in, nb_acl4_rules_in); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Inbound rule " + "specified\n"); + + if (nb_acl4_rules_out > 0) { + name = "sp_ip4_out"; + ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_out, nb_acl4_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Outbound rule " + "specified\n"); +} + +static int +sp_cmp(const void *p, const void *q) +{ + uint32_t spi1 = ((const struct acl4_rules *)p)->data.userdata; + uint32_t spi2 = ((const struct acl4_rules *)q)->data.userdata; + + return (int)(spi1 - spi2); +} + + +/* + * Search though SP rules for given SPI. + */ +int +sp4_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]) +{ + uint32_t num; + struct acl4_rules *rule; + const struct acl4_rules *acr; + struct acl4_rules tmpl; + + if (inbound != 0) { + acr = acl4_rules_in; + num = nb_acl4_rules_in; + } else { + acr = acl4_rules_out; + num = nb_acl4_rules_out; + } + + tmpl.data.userdata = spi; + + rule = bsearch(&tmpl, acr, num, sizeof(struct acl4_rules), sp_cmp); + if (rule != NULL) { + if (NULL != ip_addr && NULL != mask) { + ip_addr[0].ip.ip4 = IPV4_SRC_FROM_SP(*rule); + ip_addr[1].ip.ip4 = IPV4_DST_FROM_SP(*rule); + mask[0] = IPV4_SRC_MASK_FROM_SP(*rule); + mask[1] = IPV4_DST_MASK_FROM_SP(*rule); + } + return RTE_PTR_DIFF(rule, acr) / sizeof(struct acl4_rules); + } + + return -ENOENT; +} + +void +sp4_sort_arr(void) +{ + qsort(acl4_rules_in, nb_acl4_rules_in, sizeof(struct acl4_rules), + sp_cmp); + qsort(acl4_rules_out, nb_acl4_rules_out, sizeof(struct acl4_rules), + sp_cmp); +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/sp6.c b/src/spdk/dpdk/examples/ipsec-secgw/sp6.c new file mode 100644 index 000000000..328e08528 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/sp6.c @@ -0,0 +1,778 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/* + * Security Policies + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip6.h> + +#include <rte_acl.h> +#include <rte_ip.h> + +#include "ipsec.h" +#include "parser.h" + +#define INIT_ACL_RULE_NUM 128 + +#define IPV6_FROM_SP(acr, fidx_low, fidx_high) \ + (((uint64_t)(acr).field[(fidx_high)].value.u32 << 32) | \ + (acr).field[(fidx_low)].value.u32) + +#define IPV6_DST_FROM_SP(addr, acr) do {\ + (addr).ip.ip6.ip6[0] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_DST1, IP6_DST0));\ + (addr).ip.ip6.ip6[1] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_DST3, IP6_DST2));\ + } while (0) + +#define IPV6_SRC_FROM_SP(addr, acr) do {\ + (addr).ip.ip6.ip6[0] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_SRC1, IP6_SRC0));\ + (addr).ip.ip6.ip6[1] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_SRC3, IP6_SRC2));\ + } while (0) + +#define IPV6_DST_MASK_FROM_SP(mask, acr) \ + ((mask) = (acr).field[IP6_DST0].mask_range.u32 + \ + (acr).field[IP6_DST1].mask_range.u32 + \ + (acr).field[IP6_DST2].mask_range.u32 + \ + (acr).field[IP6_DST3].mask_range.u32) + +#define IPV6_SRC_MASK_FROM_SP(mask, acr) \ + ((mask) = (acr).field[IP6_SRC0].mask_range.u32 + \ + (acr).field[IP6_SRC1].mask_range.u32 + \ + (acr).field[IP6_SRC2].mask_range.u32 + \ + (acr).field[IP6_SRC3].mask_range.u32) + +enum { + IP6_PROTO, + IP6_SRC0, + IP6_SRC1, + IP6_SRC2, + IP6_SRC3, + IP6_DST0, + IP6_DST1, + IP6_DST2, + IP6_DST3, + IP6_SRCP, + IP6_DSTP, + IP6_NUM +}; + +#define IP6_ADDR_SIZE 16 + +static struct rte_acl_field_def ip6_defs[IP6_NUM] = { + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = IP6_PROTO, + .input_index = IP6_PROTO, + .offset = 0, + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC0, + .input_index = IP6_SRC0, + .offset = 2 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC1, + .input_index = IP6_SRC1, + .offset = 6 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC2, + .input_index = IP6_SRC2, + .offset = 10 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC3, + .input_index = IP6_SRC3, + .offset = 14 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST0, + .input_index = IP6_DST0, + .offset = 18 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST1, + .input_index = IP6_DST1, + .offset = 22 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST2, + .input_index = IP6_DST2, + .offset = 26 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST3, + .input_index = IP6_DST3, + .offset = 30 + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = IP6_SRCP, + .input_index = IP6_SRCP, + .offset = 34 + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = IP6_DSTP, + .input_index = IP6_SRCP, + .offset = 36 + } +}; + +RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs)); + +static struct acl6_rules *acl6_rules_out; +static uint32_t nb_acl6_rules_out; +static uint32_t sp_out_sz; + +static struct acl6_rules *acl6_rules_in; +static uint32_t nb_acl6_rules_in; +static uint32_t sp_in_sz; + +static int +extend_sp_arr(struct acl6_rules **sp_tbl, uint32_t cur_cnt, uint32_t *cur_sz) +{ + if (*sp_tbl == NULL) { + *sp_tbl = calloc(INIT_ACL_RULE_NUM, sizeof(struct acl6_rules)); + if (*sp_tbl == NULL) + return -1; + *cur_sz = INIT_ACL_RULE_NUM; + return 0; + } + + if (cur_cnt >= *cur_sz) { + *sp_tbl = realloc(*sp_tbl, + *cur_sz * sizeof(struct acl6_rules) * 2); + if (*sp_tbl == NULL) + return -1; + /* clean reallocated extra space */ + memset(&(*sp_tbl)[*cur_sz], 0, + *cur_sz * sizeof(struct acl6_rules)); + *cur_sz *= 2; + } + + return 0; +} + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl6_rules *rule_ipv6 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + uint32_t tv; + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl6_rules_in; + + if (extend_sp_arr(&acl6_rules_in, nb_acl6_rules_in, + &sp_in_sz) < 0) + return; + + rule_ipv6 = &acl6_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl6_rules_out; + + if (extend_sp_arr(&acl6_rules_out, nb_acl6_rules_out, + &sp_out_sz) < 0) + return; + + rule_ipv6 = &acl6_rules_out[*ri]; + + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; + } + + rule_ipv6->data.category_mask = 1; + + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + tv = atoi(tokens[ti]); + APP_CHECK(tv != DISCARD && tv != BYPASS, status, + "invalid SPI: %s", tokens[ti]); + if (status->status < 0) + return; + rule_ipv6->data.userdata = tv; + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[1].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[1].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[2].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[2].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[3].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[3].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[4].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[4].mask_range.u32 = + (depth > 32) ? 32 : depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[5].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[5].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[6].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[6].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[7].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[7].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[8].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[8].mask_range.u32 = + (depth > 32) ? 32 : depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv6->field[0].value.u8 = (uint8_t)low; + rule_ipv6->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[9].value.u16 = port_low; + rule_ipv6->field[9].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[10].value.u16 = port_low; + rule_ipv6->field[10].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static inline void +print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra) +{ + uint8_t a, b, c, d; + + uint32_t_to_char(rule->field[IP6_SRC0].value.u32, + &a, &b, &c, &d); + printf("%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC1].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC2].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC3].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d, + rule->field[IP6_SRC0].mask_range.u32 + + rule->field[IP6_SRC1].mask_range.u32 + + rule->field[IP6_SRC2].mask_range.u32 + + rule->field[IP6_SRC3].mask_range.u32); + + uint32_t_to_char(rule->field[IP6_DST0].value.u32, + &a, &b, &c, &d); + printf("%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST1].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST2].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST3].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d, + rule->field[IP6_DST0].mask_range.u32 + + rule->field[IP6_DST1].mask_range.u32 + + rule->field[IP6_DST2].mask_range.u32 + + rule->field[IP6_DST3].mask_range.u32); + + printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ", + rule->field[IP6_SRCP].value.u16, + rule->field[IP6_SRCP].mask_range.u16, + rule->field[IP6_DSTP].value.u16, + rule->field[IP6_DSTP].mask_range.u16, + rule->field[IP6_PROTO].value.u8, + rule->field[IP6_PROTO].mask_range.u8); + if (extra) + printf("0x%x-0x%x-0x%x ", + rule->data.category_mask, + rule->data.priority, + rule->data.userdata); +} + +static inline void +dump_ip6_rules(const struct acl6_rules *rule, int32_t num, int32_t extra) +{ + int32_t i; + + for (i = 0; i < num; i++, rule++) { + printf("\t%d:", i + 1); + print_one_ip6_rule(rule, extra); + printf("\n"); + } +} + +static struct rte_acl_ctx * +acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules, + uint32_t rules_nb) +{ + char s[PATH_MAX]; + struct rte_acl_param acl_param; + struct rte_acl_config acl_build_param; + struct rte_acl_ctx *ctx; + + printf("Creating SP context with %u rules\n", rules_nb); + + memset(&acl_param, 0, sizeof(acl_param)); + + /* Create ACL contexts */ + snprintf(s, sizeof(s), "%s_%d", name, socketid); + + printf("IPv4 %s entries [%u]:\n", s, rules_nb); + dump_ip6_rules(rules, rules_nb, 1); + + acl_param.name = s; + acl_param.socket_id = socketid; + acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip6_defs)); + acl_param.max_rule_num = rules_nb; + + ctx = rte_acl_create(&acl_param); + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + + if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules, + rules_nb) < 0) + rte_exit(EXIT_FAILURE, "add rules failed\n"); + + /* Perform builds */ + memset(&acl_build_param, 0, sizeof(acl_build_param)); + + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + acl_build_param.num_fields = RTE_DIM(ip6_defs); + memcpy(&acl_build_param.defs, ip6_defs, sizeof(ip6_defs)); + + if (rte_acl_build(ctx, &acl_build_param) != 0) + rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n"); + + rte_acl_dump(ctx); + + return ctx; +} + +/* + * check that for each rule it's SPI has a correspondent entry in SAD + */ +static int +check_spi_value(struct sa_ctx *sa_ctx, int inbound) +{ + uint32_t i, num, spi; + int32_t spi_idx; + struct acl6_rules *acr; + + if (inbound != 0) { + acr = acl6_rules_in; + num = nb_acl6_rules_in; + } else { + acr = acl6_rules_out; + num = nb_acl6_rules_out; + } + + for (i = 0; i != num; i++) { + spi = acr[i].data.userdata; + if (spi != DISCARD && spi != BYPASS) { + spi_idx = sa_spi_present(sa_ctx, spi, inbound); + if (spi_idx < 0) { + RTE_LOG(ERR, IPSEC, + "SPI %u is not present in SAD\n", + spi); + return -ENOENT; + } + /* Update userdata with spi index */ + acr[i].data.userdata = spi_idx + 1; + } + } + + return 0; +} + +void +sp6_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sp_ip6_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound IPv6 SP DB for socket %u " + "already initialized\n", socket_id); + + if (ctx->sp_ip6_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u " + "already initialized\n", socket_id); + + if (check_spi_value(ctx->sa_in, 1) < 0) + rte_exit(EXIT_FAILURE, + "Inbound IPv6 SP DB has unmatched in SAD SPIs\n"); + + if (check_spi_value(ctx->sa_out, 0) < 0) + rte_exit(EXIT_FAILURE, + "Outbound IPv6 SP DB has unmatched in SAD SPIs\n"); + + if (nb_acl6_rules_in > 0) { + name = "sp_ip6_in"; + ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_in, nb_acl6_rules_in); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Inbound rule " + "specified\n"); + + if (nb_acl6_rules_out > 0) { + name = "sp_ip6_out"; + ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_out, nb_acl6_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Outbound rule " + "specified\n"); +} + +static int +sp_cmp(const void *p, const void *q) +{ + uint32_t spi1 = ((const struct acl6_rules *)p)->data.userdata; + uint32_t spi2 = ((const struct acl6_rules *)q)->data.userdata; + + return (int)(spi1 - spi2); +} + +/* + * Search though SP rules for given SPI. + */ +int +sp6_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]) +{ + uint32_t num; + struct acl6_rules *rule; + const struct acl6_rules *acr; + struct acl6_rules tmpl; + + if (inbound != 0) { + acr = acl6_rules_in; + num = nb_acl6_rules_in; + } else { + acr = acl6_rules_out; + num = nb_acl6_rules_out; + } + + tmpl.data.userdata = spi; + + rule = bsearch(&tmpl, acr, num, sizeof(struct acl6_rules), sp_cmp); + if (rule != NULL) { + if (NULL != ip_addr && NULL != mask) { + IPV6_SRC_FROM_SP(ip_addr[0], *rule); + IPV6_DST_FROM_SP(ip_addr[1], *rule); + IPV6_SRC_MASK_FROM_SP(mask[0], *rule); + IPV6_DST_MASK_FROM_SP(mask[1], *rule); + } + return RTE_PTR_DIFF(rule, acr) / sizeof(struct acl6_rules); + } + + return -ENOENT; +} + +void +sp6_sort_arr(void) +{ + qsort(acl6_rules_in, nb_acl6_rules_in, sizeof(struct acl6_rules), + sp_cmp); + qsort(acl6_rules_out, nb_acl6_rules_out, sizeof(struct acl6_rules), + sp_cmp); +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/bypass_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/bypass_defs.sh new file mode 100644 index 000000000..e553635b9 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/bypass_defs.sh @@ -0,0 +1,46 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_null0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} + +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} + +SGW_CMD_XPRM='-w 300 -l' + +config_remote_xfrm() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config6_remote_xfrm() +{ + config_remote_xfrm +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/common_defs.sh new file mode 100644 index 000000000..df680805b --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/common_defs.sh @@ -0,0 +1,231 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +# check ETH_DEV +if [[ -z "${ETH_DEV}" ]]; then + echo "ETH_DEV is invalid" + exit 127 +fi + +# check that REMOTE_HOST is reachable +ssh ${REMOTE_HOST} echo +st=$? +if [[ $st -ne 0 ]]; then + echo "host ${REMOTE_HOST} is not reachable" + exit $st +fi + +# get ether addr of REMOTE_HOST +REMOTE_MAC=`ssh ${REMOTE_HOST} ip addr show dev ${REMOTE_IFACE}` +st=$? +REMOTE_MAC=`echo ${REMOTE_MAC} | sed -e 's/^.*ether //' -e 's/ brd.*$//'` +if [[ $st -ne 0 || -z "${REMOTE_MAC}" ]]; then + echo "coouldn't retrieve ether addr from ${REMOTE_IFACE}" + exit 127 +fi + +LOCAL_IFACE=dtap0 + +LOCAL_MAC="00:64:74:61:70:30" + +REMOTE_IPV4=192.168.31.14 +LOCAL_IPV4=192.168.31.92 + +REMOTE_IPV6=fd12:3456:789a:0031:0000:0000:0000:0014 +LOCAL_IPV6=fd12:3456:789a:0031:0000:0000:0000:0092 + +DPDK_PATH=${RTE_SDK:-${PWD}} +DPDK_BUILD=${RTE_TARGET:-x86_64-native-linux-gcc} +DPDK_VARS="" + +# by default ipsec-secgw can't deal with multi-segment packets +# make sure our local/remote host wouldn't generate fragmented packets +# if reassmebly option is not enabled +DEF_MTU_LEN=1400 +DEF_PING_LEN=1200 + +# set operation mode based on environment variables values +select_mode() +{ + echo "Test environment configuration:" + # check which mode to be enabled (library/legacy) + if [[ -n "${SGW_MODE}" && "${SGW_MODE}" == "library" ]]; then + DPDK_MODE="-w 300 -l" + echo "[enabled] library mode" + else + DPDK_MODE="" + echo "[enabled] legacy mode" + fi + + # check if esn is demanded + if [[ -n "${SGW_ESN}" && "${SGW_ESN}" == "esn-on" ]]; then + DPDK_VARS="${DPDK_VARS} -e" + XFRM_ESN="flag esn" + echo "[enabled] extended sequence number" + else + XFRM_ESN="" + echo "[disabled] extended sequence number" + fi + + # check if atom is demanded + if [[ -n "${SGW_ATOM}" && "${SGW_ATOM}" == "atom-on" ]]; then + DPDK_VARS="${DPDK_VARS} -a" + echo "[enabled] sequence number atomic behavior" + else + echo "[disabled] sequence number atomic behavior" + fi + + # check if inline should be enabled + if [[ -n "${SGW_CRYPTO}" && "${SGW_CRYPTO}" == "inline" ]]; then + CRYPTO_DEV='--vdev="crypto_null0"' + SGW_CFG_XPRM_IN="port_id 0 type inline-crypto-offload" + SGW_CFG_XPRM_OUT="port_id 0 type inline-crypto-offload" + echo "[enabled] inline crypto mode" + else + SGW_CFG_XPRM_IN="" + SGW_CFG_XPRM_OUT="" + echo "[disabled] inline crypto mode" + fi + + # check if fallback should be enabled + if [[ -n "${SGW_CRYPTO_FLBK}" ]] && [[ -n ${SGW_CFG_XPRM_IN} ]] \ + && [[ "${SGW_MODE}" == "library" ]] \ + && [[ "${SGW_CRYPTO_FLBK}" == "cpu-crypto" \ + || "${SGW_CRYPTO_FLBK}" == "lookaside-none" ]]; then + CRYPTO_DEV="" + SGW_CFG_XPRM_IN="${SGW_CFG_XPRM_IN} fallback ${SGW_CRYPTO_FLBK}" + SGW_CFG_XPRM_OUT="" + echo "[enabled] crypto fallback ${SGW_CRYPTO_FLBK} mode" + else + if [[ -n "${SGW_CRYPTO_FLBK}" \ + && "${SGW_CRYPTO}" != "inline" ]]; then + echo "SGW_CRYPTO variable needs to be set to \ +\"inline\" for ${SGW_CRYPTO_FLBK} fallback setting" + exit 127 + elif [[ -n "${SGW_CRYPTO_FLBK}" \ + && "${SGW_MODE}" != "library" ]]; then + echo "SGW_MODE variable needs to be set to \ +\"library\" for ${SGW_CRYPTO_FLBK} fallback setting" + exit 127 + fi + echo "[disabled] crypto fallback mode" + fi + + # select sync/async mode + if [[ -n "${CRYPTO_PRIM_TYPE}" && -n "${DPDK_MODE}" ]]; then + echo "[enabled] crypto primary type - ${CRYPTO_PRIM_TYPE}" + SGW_CFG_XPRM_IN="${SGW_CFG_XPRM_IN} type ${CRYPTO_PRIM_TYPE}" + SGW_CFG_XPRM_OUT="${SGW_CFG_XPRM_OUT} type ${CRYPTO_PRIM_TYPE}" + else + if [[ -n "${CRYPTO_PRIM_TYPE}" \ + && "${SGW_MODE}" != "library" ]]; then + echo "SGW_MODE variable needs to be set to \ +\"library\" for ${CRYPTO_PRIM_TYPE} crypto primary type setting" + exit 127 + fi + fi + + + # make linux to generate fragmented packets + if [[ -n "${SGW_MULTI_SEG}" && -n "${DPDK_MODE}" ]]; then + echo -e "[enabled] multi-segment test is enabled\n" + SGW_CMD_XPRM="--reassemble ${SGW_MULTI_SEG}" + PING_LEN=5000 + MTU_LEN=1500 + else + if [[ -z "${SGW_MULTI_SEG}" \ + && "${SGW_CFG_XPRM_IN}" == *fallback* ]]; then + echo "SGW_MULTI_SEG environment variable needs \ +to be set for ${SGW_CRYPTO_FLBK} fallback test" + exit 127 + elif [[ -n "${SGW_MULTI_SEG}" \ + && "${SGW_MODE}" != "library" ]]; then + echo "SGW_MODE variable needs to be set to \ +\"library\" for multiple segment reassemble setting" + exit 127 + fi + + echo -e "[disabled] multi-segment test\n" + PING_LEN=${DEF_PING_LEN} + MTU_LEN=${DEF_MTU_LEN} + fi +} + +# setup mtu on local iface +set_local_mtu() +{ + mtu=$1 + ifconfig ${LOCAL_IFACE} mtu ${mtu} + sysctl -w net.ipv6.conf.${LOCAL_IFACE}.mtu=${mtu} +} + +# configure local host/ifaces +config_local_iface() +{ + ifconfig ${LOCAL_IFACE} ${LOCAL_IPV4}/24 up + ifconfig ${LOCAL_IFACE} + + ip neigh flush dev ${LOCAL_IFACE} + ip neigh add ${REMOTE_IPV4} dev ${LOCAL_IFACE} lladdr ${REMOTE_MAC} + ip neigh show dev ${LOCAL_IFACE} +} + +config6_local_iface() +{ + config_local_iface + + sysctl -w net.ipv6.conf.${LOCAL_IFACE}.disable_ipv6=0 + ip addr add ${LOCAL_IPV6}/64 dev ${LOCAL_IFACE} + + ip -6 neigh add ${REMOTE_IPV6} dev ${LOCAL_IFACE} lladdr ${REMOTE_MAC} + ip neigh show dev ${LOCAL_IFACE} +} + +# configure remote host/iface +config_remote_iface() +{ + ssh ${REMOTE_HOST} ifconfig ${REMOTE_IFACE} down + ssh ${REMOTE_HOST} ifconfig ${REMOTE_IFACE} ${REMOTE_IPV4}/24 up + ssh ${REMOTE_HOST} ifconfig ${REMOTE_IFACE} + + ssh ${REMOTE_HOST} ip neigh flush dev ${REMOTE_IFACE} + + ssh ${REMOTE_HOST} ip neigh add ${LOCAL_IPV4} \ + dev ${REMOTE_IFACE} lladdr ${LOCAL_MAC} + ssh ${REMOTE_HOST} ip neigh show dev ${REMOTE_IFACE} + + ssh ${REMOTE_HOST} iptables --flush +} + +config6_remote_iface() +{ + config_remote_iface + + ssh ${REMOTE_HOST} sysctl -w \ + net.ipv6.conf.${REMOTE_IFACE}.disable_ipv6=0 + ssh ${REMOTE_HOST} ip addr add ${REMOTE_IPV6}/64 dev ${REMOTE_IFACE} + + ssh ${REMOTE_HOST} ip -6 neigh add ${LOCAL_IPV6} \ + dev ${REMOTE_IFACE} lladdr ${LOCAL_MAC} + ssh ${REMOTE_HOST} ip neigh show dev ${REMOTE_IFACE} + + ssh ${REMOTE_HOST} ip6tables --flush +} + +# configure remote and local host/iface +config_iface() +{ + config_local_iface + config_remote_iface +} + +config6_iface() +{ + config6_local_iface + config6_remote_iface +} + +# secgw application parameters setup +SGW_PORT_CFG="--vdev=\"net_tap0,mac=fixed\" ${ETH_DEV}" +SGW_WAIT_DEV="${LOCAL_IFACE}" +. ${DIR}/common_defs_secgw.sh diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/common_defs_secgw.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/common_defs_secgw.sh new file mode 100644 index 000000000..e431c8ee3 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/common_defs_secgw.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +# check required parameters +SGW_REQ_VARS="SGW_PATH SGW_PORT_CFG SGW_WAIT_DEV" +for reqvar in ${SGW_REQ_VARS} +do + if [[ -z "${!reqvar}" ]]; then + echo "Required parameter ${reqvar} is empty" + exit 127 + fi +done + +# check if SGW_PATH point to an executable +if [[ ! -x ${SGW_PATH} ]]; then + echo "${SGW_PATH} is not executable" + exit 127 +fi + +# setup SGW_LCORE +SGW_LCORE=${SGW_LCORE:-0} + +# setup config and output filenames +SGW_OUT_FILE=./ipsec-secgw.out1 +SGW_CFG_FILE=$(mktemp) + +# setup secgw parameters +SGW_CMD_EAL_PRM="--lcores=${SGW_LCORE} -n 4" +SGW_CMD_CFG="(0,0,${SGW_LCORE}),(1,0,${SGW_LCORE})" +SGW_CMD_PRM="-p 0x3 -u 1 -P --config=\"${SGW_CMD_CFG}\"" + +# start ipsec-secgw +secgw_start() +{ + SGW_EXEC_FILE=$(mktemp) + cat <<EOF > ${SGW_EXEC_FILE} +stdbuf -o0 ${SGW_PATH} ${SGW_CMD_EAL_PRM} ${CRYPTO_DEV} \ +${SGW_PORT_CFG} ${SGW_EAL_XPRM} \ +-- ${SGW_CMD_PRM} ${SGW_CMD_XPRM} -f ${SGW_CFG_FILE} > \ +${SGW_OUT_FILE} 2>&1 & +p=\$! +echo \$p +EOF + + cat ${SGW_EXEC_FILE} + cat ${SGW_CFG_FILE} + SGW_PID=`/bin/bash -x ${SGW_EXEC_FILE}` + + # wait till ipsec-secgw start properly + i=0 + st=1 + while [[ $i -ne 10 && $st -ne 0 ]]; do + sleep 1 + ifconfig ${SGW_WAIT_DEV} + st=$? + let i++ + done +} + +# stop ipsec-secgw and cleanup +secgw_stop() +{ + kill ${SGW_PID} + rm -f ${SGW_EXEC_FILE} + rm -f ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/data_rxtx.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/data_rxtx.sh new file mode 100644 index 000000000..05090e344 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/data_rxtx.sh @@ -0,0 +1,65 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +TCP_PORT=22222 + +ping_test1() +{ + dst=$1 + i=${2:-0} + end=${3:-1200} + + st=0 + while [[ $i -ne $end && $st -eq 0 ]]; + do + ping -c 1 -s ${i} -M dont ${dst} + st=$? + let i++ + done + + if [[ $st -ne 0 ]]; then + echo "ERROR: $0 failed for dst=${dst}, sz=${i}" + fi + return $st; +} + +ping6_test1() +{ + dst=$1 + i=${2:-0} + end=${3:-1200} + + st=0 + while [[ $i -ne $end && $st -eq 0 ]]; + do + ping6 -c 1 -s ${i} -M dont ${dst} + st=$? + let i++ + done + + if [[ $st -ne 0 ]]; then + echo "ERROR: $0 failed for dst=${dst}, sz=${i}" + fi + return $st; +} + +scp_test1() +{ + dst=$1 + + for sz in 1234 23456 345678 4567890 56789102 ; do + x=`basename $0`.${sz} + dd if=/dev/urandom of=${x} bs=${sz} count=1 + scp ${x} [${dst}]:${x} + scp [${dst}]:${x} ${x}.copy1 + diff -u ${x} ${x}.copy1 + st=$? + rm -f ${x} ${x}.copy1 + ssh ${REMOTE_HOST} rm -f ${x} + if [[ $st -ne 0 ]]; then + return $st + fi + done + + return 0; +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/linux_test.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/linux_test.sh new file mode 100644 index 000000000..85dbf7e8a --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/linux_test.sh @@ -0,0 +1,141 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +# Usage: /bin/bash linux_test.sh <ip_protocol> <ipsec_mode> +# <ip_protocol> can be set to: +# ipv4-ipv4 - only IPv4 traffic +# ipv4-ipv6 - IPv4 traffic over IPv6 ipsec tunnel (only for tunnel mode) +# ipv6-ipv4 - IPv6 traffic over IPv4 ipsec tunnel (only for tunnel mode) +# ipv6-ipv6 - only IPv6 traffic +# For list of available modes please refer to run_test.sh. +# +# Note that most of them require appropriate crypto PMD/device to be available. +# Also user has to setup properly the following environment variables: +# SGW_PATH - path to the ipsec-secgw binary to test +# REMOTE_HOST - ip/hostname of the DUT +# REMOTE_IFACE - iface name for the test-port on DUT +# ETH_DEV - ethernet device to be used on SUT by DPDK ('-w <pci-id>') +# Also user can optionally setup: +# SGW_LCORE - lcore to run ipsec-secgw on (default value is 0) +# SGW_MODE - run ipsec-secgw in legacy mode or with use of library +# values: legacy/library (legacy on default) +# SGW_ESN - run ipsec-secgw with extended sequence number +# values: esn-on/esn-off (esn-off on default) +# SGW_ATOM - run ipsec-secgw with sequence number atomic behavior +# values: atom-on/atom-off (atom-off on default) +# SGW_CRYPTO - run ipsec-secgw with use of inline crypto +# values: inline (unset on default) +# SGW_CRYPTO_FLBK - run ipsec-secgw with crypto fallback configured +# values: cpu-crypto/lookaside-none (unset on default) +# CRYPTO_PRIM_TYPE - run ipsec-secgw with crypto primary type set +# values: cpu-crypto (unset on default) +# CRYPTO_DEV - crypto device to be used ('-w <pci-id>') +# if none specified appropriate vdevs will be created by the script +# SGW_MULTI_SEG - ipsec-secgw option to enable reassembly support and +# specify size of reassembly table (i.e. SGW_MULTI_SEG=128) +# +# The purpose of the script is to automate ipsec-secgw testing +# using another system running linux as a DUT. +# It expects that SUT and DUT are connected through at least 2 NICs. +# One NIC is expected to be managed by linux both machines, +# and will be used as a control path +# Make sure user from SUT can ssh to DUT without entering password. +# Second NIC (test-port) should be reserved for DPDK on SUT, +# and should be managed by linux on DUT. +# The script starts ipsec-secgw with 2 NIC devices: test-port and tap vdev. +# Then configures local tap iface and remote iface and ipsec policies +# in the following way: +# traffic going over test-port in both directions has to be +# protected by ipsec. +# Traffic going over TAP in both directions doesn't have to be protected. +# I.E: +# DUT OS(NIC1)--(ipsec)-->(NIC1)ipsec-secgw(TAP)--(plain)-->(TAP)SUT OS +# SUT OS(TAP)--(plain)-->(TAP)psec-secgw(NIC1)--(ipsec)-->(NIC1)DUT OS +# Then tries to perform some data transfer using the scheme described above. +# + +DIR=`dirname $0` +PROTO=$1 +MODE=$2 + + . ${DIR}/common_defs.sh + +select_mode + + . ${DIR}/${MODE}_defs.sh + +if [[ "${PROTO}" == "ipv4-ipv4" ]] || [[ "${PROTO}" == "ipv6-ipv6" ]]; then + config_secgw +else + config_secgw_mixed +fi + +secgw_start + + . ${DIR}/data_rxtx.sh + +if [[ "${PROTO}" == "ipv4-ipv4" ]]; then + config_iface + config_remote_xfrm_44 + set_local_mtu ${MTU_LEN} + ping_test1 ${REMOTE_IPV4} 0 ${PING_LEN} + + st=$? + if [[ $st -eq 0 ]]; then + set_local_mtu ${DEF_MTU_LEN} + scp_test1 ${REMOTE_IPV4} + st=$? + fi +elif [[ "${PROTO}" == "ipv4-ipv6" ]]; then + if [[ "${MODE}" == trs* ]]; then + echo "Cannot mix protocols in transport mode" + secgw_stop + exit 1 + fi + config6_iface + config_remote_xfrm_46 + set_local_mtu ${MTU_LEN} + ping_test1 ${REMOTE_IPV4} 0 ${PING_LEN} + + st=$? + if [[ $st -eq 0 ]]; then + set_local_mtu ${DEF_MTU_LEN} + scp_test1 ${REMOTE_IPV4} + st=$? + fi +elif [[ "${PROTO}" == "ipv6-ipv4" ]]; then + if [[ "${MODE}" == trs* ]]; then + echo "Cannot mix protocols in transport mode" + secgw_stop + exit 1 + fi + config6_iface + config_remote_xfrm_64 + + set_local_mtu ${MTU_LEN} + ping6_test1 ${REMOTE_IPV6} 0 ${PING_LEN} + st=$? + if [[ $st -eq 0 ]]; then + set_local_mtu ${DEF_MTU_LEN} + scp_test1 ${REMOTE_IPV6} + st=$? + fi +elif [[ "${PROTO}" == "ipv6-ipv6" ]]; then + config6_iface + config_remote_xfrm_66 + set_local_mtu ${MTU_LEN} + ping6_test1 ${REMOTE_IPV6} 0 ${PING_LEN} + + st=$? + if [[ $st -eq 0 ]]; then + set_local_mtu ${DEF_MTU_LEN} + scp_test1 ${REMOTE_IPV6} + st=$? + fi +else + echo "Invalid <proto>" + st=128 +fi + +secgw_stop +exit $st diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/load_env.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/load_env.sh new file mode 100644 index 000000000..fff9176fb --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/load_env.sh @@ -0,0 +1,121 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +DIR=`dirname $0` + +regular=0 +inline_on=0 +fallback_on=0 +legacy_only=0 +fallback_val="lookaside-none" +crypto_prim="" +multi_seg_val="" +while getopts ":iflsrc" opt +do + case $opt in + i) + inline_on=1 + ;; + f) + fallback_on=1 + ;; + l) + legacy_only=1 + ;; + s) + multi_seg_val="SGW_MULTI_SEG=128" + ;; + r) + regular=1 + ;; + c) + crypto_prim="CRYPTO_PRIM_TYPE=cpu-crypto" + fallback_val="cpu-crypto" + ;; + esac +done +shift $((OPTIND -1)) + +PROTO=$1 +MODE=$2 + +# test scenarios to set up for regular test +TEST_MODES_REGULAR="legacy \ +library \ +library_esn \ +library_esn_atom" + +# test scenarios to set up for inline test +TEST_MODES_INLINE="legacy_inline \ +library_inline" + +# test scenarios to set up for fallback test +TEST_MODES_FALLBACK="library_fallback" + +# env variables to export for specific test scenarios +default="SGW_MODE=legacy SGW_ESN=esn-off SGW_ATOM=atom-off SGW_CRYPTO=regular \ +SGW_CRYPTO_FLBK= ${multi_seg_val}" +legacy="${default} CRYPTO_PRIM_TYPE=" +library="${default} SGW_MODE=library ${crypto_prim}" +library_esn="${default} SGW_MODE=library SGW_ESN=esn-on ${crypto_prim}" +library_esn_atom="${default} SGW_MODE=library SGW_ESN=esn-on SGW_ATOM=atom-on \ +${crypto_prim}" +legacy_inline="${default} SGW_CRYPTO=inline CRYPTO_PRIM_TYPE=" +library_inline="${default} SGW_MODE=library SGW_CRYPTO=inline CRYPTO_PRIM_TYPE=" +library_fallback="${default} SGW_MODE=library SGW_CRYPTO=inline \ +SGW_CRYPTO_FLBK=${fallback_val} SGW_MULTI_SEG=128 CRYPTO_PRIM_TYPE=" + +# export needed env variables and run tests +if [[ ${regular} -eq 1 ]]; then + for i in ${TEST_MODES_REGULAR}; do + if [[ ${legacy_only} -eq 1 && "${i}" != *legacy* ]]; then + continue + elif [[ ${legacy_only} -eq 0 && "${i}" == *legacy* ]]; then + continue + fi + for x in ${!i}; do + export ${x} + done + + /bin/bash ${DIR}/linux_test.sh ${PROTO} ${MODE} + st=$? + if [[ ${st} -ne 0 ]]; then + exit ${st} + fi + done +elif [[ ${inline_on} -eq 1 || ${fallback_on} -eq 1 ]]; then + if [[ ${inline_on} -eq 1 ]]; then + for i in ${TEST_MODES_INLINE}; do + if [[ ${legacy_only} -eq 1 && "${i}" != *legacy* ]] + then + continue + elif [[ ${legacy_only} -eq 0 && "${i}" == *legacy* ]] + then + continue + fi + for x in ${!i}; do + export ${x} + done + + /bin/bash ${DIR}/linux_test.sh ${PROTO} ${MODE} + st=$? + if [[ ${st} -ne 0 ]]; then + exit ${st} + fi + done + fi + if [[ ${fallback_on} -eq 1 ]]; then + for i in ${TEST_MODES_FALLBACK}; do + for x in ${!i}; do + export ${x} + done + + /bin/bash ${DIR}/linux_test.sh ${PROTO} ${MODE} + st=$? + if [[ ${st} -ne 0 ]]; then + exit ${st} + fi + done + fi +fi +exit 0 diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/pkttest.py b/src/spdk/dpdk/examples/ipsec-secgw/test/pkttest.py new file mode 100755 index 000000000..785b2fb88 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/pkttest.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: BSD-3-Clause + +import fcntl +import pkg_resources +import socket +import struct +import sys +import unittest + + +if sys.version_info < (3, 0): + print("Python3 is required to run this script") + sys.exit(1) + + +try: + from scapy.all import Ether +except ImportError: + print("Scapy module is required") + sys.exit(1) + + +PKTTEST_REQ = [ + "scapy>=2.4.3", +] + + +def assert_requirements(req): + """ + assert requirement is met + req can hold a string or a list of strings + """ + try: + pkg_resources.require(req) + except (pkg_resources.DistributionNotFound, pkg_resources.VersionConflict) as e: + print("Requirement assertion: " + str(e)) + sys.exit(1) + + +TAP_UNPROTECTED = "dtap1" +TAP_PROTECTED = "dtap0" + + +class Interface(object): + ETH_P_ALL = 3 + MAX_PACKET_SIZE = 1280 + IOCTL_GET_INFO = 0x8927 + SOCKET_TIMEOUT = 0.5 + def __init__(self, ifname): + self.name = ifname + + # create and bind socket to specified interface + self.s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(Interface.ETH_P_ALL)) + self.s.settimeout(Interface.SOCKET_TIMEOUT) + self.s.bind((self.name, 0, socket.PACKET_OTHERHOST)) + + # get interface MAC address + info = fcntl.ioctl(self.s.fileno(), Interface.IOCTL_GET_INFO, struct.pack('256s', bytes(ifname[:15], encoding='ascii'))) + self.mac = ':'.join(['%02x' % i for i in info[18:24]]) + + def __del__(self): + self.s.close() + + def send_l3packet(self, pkt, mac): + e = Ether(src=self.mac, dst=mac) + self.send_packet(e/pkt) + + def send_packet(self, pkt): + self.send_bytes(bytes(pkt)) + + def send_bytes(self, bytedata): + self.s.send(bytedata) + + def recv_packet(self): + return Ether(self.recv_bytes()) + + def recv_bytes(self): + return self.s.recv(Interface.MAX_PACKET_SIZE) + + def get_mac(self): + return self.mac + + +class PacketXfer(object): + def __init__(self, protected_iface=TAP_PROTECTED, unprotected_iface=TAP_UNPROTECTED): + self.protected_port = Interface(protected_iface) + self.unprotected_port = Interface(unprotected_iface) + + def send_to_protected_port(self, pkt, remote_mac=None): + if remote_mac is None: + remote_mac = self.unprotected_port.get_mac() + self.protected_port.send_l3packet(pkt, remote_mac) + + def send_to_unprotected_port(self, pkt, remote_mac=None): + if remote_mac is None: + remote_mac = self.protected_port.get_mac() + self.unprotected_port.send_l3packet(pkt, remote_mac) + + def xfer_unprotected(self, pkt): + self.send_to_unprotected_port(pkt) + return self.protected_port.recv_packet() + + def xfer_protected(self, pkt): + self.send_to_protected_port(pkt) + return self.unprotected_port.recv_packet() + + +def pkttest(): + if len(sys.argv) == 1: + sys.exit(unittest.main(verbosity=2)) + elif len(sys.argv) == 2: + if sys.argv[1] == "config": + module = __import__('__main__') + try: + print(module.config()) + except AttributeError: + sys.stderr.write("Cannot find \"config()\" in a test") + sys.exit(1) + else: + sys.exit(1) + + +if __name__ == "__main__": + if len(sys.argv) == 2 and sys.argv[1] == "check_reqs": + assert_requirements(PKTTEST_REQ) + else: + print("Usage: " + sys.argv[0] + " check_reqs") diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/pkttest.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/pkttest.sh new file mode 100755 index 000000000..f19247254 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/pkttest.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +DIR=$(dirname $0) + +if [ $(id -u) -ne 0 ]; then + echo "Run as root" + exit 1 +fi + +# check python requirements +python3 ${DIR}/pkttest.py check_reqs +if [ $? -ne 0 ]; then + echo "Requirements for Python not met, exiting" + exit 1 +fi + +# secgw application parameters setup +CRYPTO_DEV="--vdev=crypto_null0" +SGW_PORT_CFG="--vdev=net_tap0,mac=fixed --vdev=net_tap1,mac=fixed" +SGW_EAL_XPRM="--no-pci" +SGW_CMD_XPRM=-l +SGW_WAIT_DEV="dtap0" +. ${DIR}/common_defs_secgw.sh + +echo "Running tests: $*" +for testcase in $* +do + # check test file presence + testfile="${DIR}/${testcase}.py" + if [ ! -f ${testfile} ]; then + echo "Invalid test ${testcase}" + continue + fi + + # prepare test config + python3 ${testfile} config > ${SGW_CFG_FILE} + if [ $? -ne 0 ]; then + rm -f ${SGW_CFG_FILE} + echo "Cannot get secgw configuration for test ${testcase}" + exit 1 + fi + + # start the application + secgw_start + + # setup interfaces + ifconfig dtap0 up + ifconfig dtap1 up + + # run the test + echo "Running test case: ${testcase}" + python3 ${testfile} + st=$? + + # stop the application + secgw_stop + + # report test result and exit on failure + if [ $st -eq 0 ]; then + echo "Test case ${testcase} succeeded" + else + echo "Test case ${testcase} failed!" + exit $st + fi +done diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/run_test.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/run_test.sh new file mode 100755 index 000000000..1222308bb --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/run_test.sh @@ -0,0 +1,242 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +# Usage: /bin/bash run_test.sh [-46miflscph] <ipsec_mode> +# Run all defined linux_test.sh test-cases one by one +# If <ipsec_mode> is specified, run only that test case +# User has to setup properly the following environment variables: +# SGW_PATH - path to the ipsec-secgw binary to test +# REMOTE_HOST - ip/hostname of the DUT +# REMOTE_IFACE - iface name for the test-port on DUT +# ETH_DEV - ethernet device to be used on SUT by DPDK ('-w <pci-id>') +# Also user can optionally setup: +# SGW_LCORE - lcore to run ipsec-secgw on (default value is 0) +# CRYPTO_DEV - crypto device to be used ('-w <pci-id>') +# if none specified appropriate vdevs will be created by the script +# SGW_MULTI_SEG - ipsec-secgw option to enable reassembly support and +# specify size of reassembly table (i.e. SGW_MULTI_SEG=128) +# Refer to linux_test.sh for more information + +# All supported modes to test: +# trs_3descbc_sha1 +# trs_aescbc_sha1 +# trs_aesctr_sha1 +# trs_aesgcm +# tun_3descbc_sha1 +# tun_aescbc_sha1 +# tun_aesctr_sha1 +# tun_aesgcm +# Naming convention: +# 'tun/trs' refer to tunnel/transport mode respectively + +usage() +{ + echo "Usage:" + echo -e "\t$0 -[46miflscph] <ipsec_mode>" + echo -e "\t\t-4 Perform Linux IPv4 network tests" + echo -e "\t\t-6 Perform Linux IPv6 network tests" + echo -e "\t\t-m Add mixed IP protocol tests to IPv4/IPv6 \ +(only with option [-46])" + echo -e "\t\t-i Run inline tests (only with option [-46])" + echo -e "\t\t-f Run fallback tests (only with option [-46])" + echo -e "\t\t-l Run tests in legacy mode" + echo -e "\t\t-s Run all tests with reassembly support \ +(on default only fallback tests use reassembly support)" + echo -e "\t\t-c Run tests with use of cpu-crypto \ +(on default lookaside-none is used)" + echo -e "\t\t-p Perform packet validation tests" + echo -e "\t\t-h Display this help" + echo -e "\t\t<ipsec_mode> Run only specified test case i.e. tun_aesgcm" +} + +LINUX_TEST="trs_3descbc_sha1 \ +trs_aescbc_sha1 \ +trs_aesctr_sha1 \ +trs_aesgcm \ +tun_3descbc_sha1 \ +tun_aescbc_sha1 \ +tun_aesctr_sha1 \ +tun_aesgcm" + +LINUX_TEST_INLINE_FALLBACK="trs_aesgcm \ +tun_aesgcm" + +LINUX_TEST_RUN="" + +PKT_TESTS="trs_ipv6opts \ +tun_null_header_reconstruct" + +DIR=$(dirname $0) + +# get input options +run4=0 +run6=0 +runpkt=0 +mixed=0 +inline=0 +fallback=0 +legacy=0 +multi_seg=0 +cpu_crypto=0 +options="" +while getopts ":46miflscph" opt +do + case $opt in + 4) + run4=1 + ;; + 6) + run6=1 + ;; + m) + mixed=1 + ;; + i) + inline=1 + ;; + f) + fallback=1 + ;; + l) + legacy=1 + options="${options} -l" + ;; + s) + multi_seg=1 + options="${options} -s" + ;; + c) + cpu_crypto=1 + options="${options} -c" + ;; + p) + runpkt=1 + ;; + h) + usage + exit 0 + ;; + ?) + echo "Invalid option" + usage + exit 127 + ;; + esac +done + +shift $((OPTIND -1)) +LINUX_TEST_RUN=$* + +# no test suite has been selected +if [[ ${run4} -eq 0 && ${run6} -eq 0 && ${runpkt} -eq 0 ]]; then + usage + exit 127 +fi + +# check parameters +if [[ ${legacy} -eq 1 ]] && [[ ${multi_seg} -eq 1 || ${fallback} -eq 1 \ + || ${cpu_crypto} -eq 1 ]]; then + echo "Fallback/reassembly/cpu-crypto cannot be used with legacy mode" + exit 127 +fi + +if [[ ${cpu_crypto} -eq 1 && ${inline} -eq 1 && ${fallback} -eq 0 ]]; then + echo "cpu-crypto cannot be used with inline mode" + exit 127 +fi + +# perform packet processing validation tests +st=0 +if [ $runpkt -eq 1 ]; then + echo "Performing packet validation tests" + /bin/bash ${DIR}/pkttest.sh ${PKT_TESTS} + st=$? + + echo "pkttests finished with status ${st}" + if [[ ${st} -ne 0 ]]; then + echo "ERROR pkttests FAILED" + exit ${st} + fi +fi + +desc="" + +# set inline/fallback tests if needed +if [[ ${inline} -eq 1 || ${fallback} -eq 1 ]]; then + + # add inline option if needed + if [[ ${inline} -eq 1 ]]; then + options="${options} -i" + desc="inline" + fi + # add fallback option if needed + if [[ ${fallback} -eq 1 ]]; then + options="${options} -f" + if [[ "${desc}" == "inline" ]]; then + desc="${desc} and fallback" + else + desc="fallback" + fi + fi + + # select tests to run + if [[ -z "${LINUX_TEST_RUN}" ]]; then + LINUX_TEST_RUN="${LINUX_TEST_INLINE_FALLBACK}" + fi +else + options="${options} -r" +fi + +# select tests to run +if [[ -z "${LINUX_TEST_RUN}" ]]; then + LINUX_TEST_RUN="${LINUX_TEST}" +fi + +# perform selected tests +if [[ ${run4} -eq 1 || ${run6} -eq 1 ]] ; then + + for i in ${LINUX_TEST_RUN}; do + + echo "starting ${desc} test ${i}" + + st4=0 + st4m=0 + if [[ ${run4} -ne 0 ]]; then + /bin/bash ${DIR}/load_env.sh ${options} ipv4-ipv4 ${i} + st4=$? + echo "${desc} test IPv4 ${i} finished with status \ +${st4}" + if [[ ${mixed} -ne 0 ]] && [[ "${i}" == tun* ]]; then + /bin/bash ${DIR}/load_env.sh ${options} \ + ipv4-ipv6 ${i} + st4m=$? + echo "${desc} test IPv4-IPv6 ${i} finished with\ + status ${st4m}" + fi + fi + + st6=0 + st6m=0 + if [[ ${run6} -ne 0 ]]; then + /bin/bash ${DIR}/load_env.sh ${options} ipv6-ipv6 ${i} + st6=$? + echo "${desc} test IPv6 ${i} finished with status \ +${st6}" + if [[ ${mixed} -ne 0 ]] && [[ "${i}" == tun* ]]; then + /bin/bash ${DIR}/load_env.sh ${options} \ + ipv6-ipv4 ${i} + st6m=$? + echo "${desc} test IPv6-IPv4 ${i} finished with\ + status ${st6m}" + fi + fi + + let "st = st4 + st6 + st4m + st6m" + if [[ $st -ne 0 ]]; then + echo "ERROR ${desc} test ${i} FAILED" + exit $st + fi + done +fi + +echo "All tests have ended successfully" diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_3descbc_sha1_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_3descbc_sha1_common_defs.sh new file mode 100644 index 000000000..a66b0ec1e --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_3descbc_sha1_common_defs.sh @@ -0,0 +1,74 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_mb0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#SP in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +sa in 9 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#SA out rules +sa out 9 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_3descbc_sha1_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_3descbc_sha1_defs.sh new file mode 100644 index 000000000..bbee6a1da --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_3descbc_sha1_defs.sh @@ -0,0 +1,69 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/trs_3descbc_sha1_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 4 + + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aescbc_sha1_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aescbc_sha1_common_defs.sh new file mode 100644 index 000000000..d92292452 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aescbc_sha1_common_defs.sh @@ -0,0 +1,70 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_mb0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#SP in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +sa in 9 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#SA out rules +sa out 9 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aescbc_sha1_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aescbc_sha1_defs.sh new file mode 100644 index 000000000..0665a0bc6 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aescbc_sha1_defs.sh @@ -0,0 +1,69 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/trs_aescbc_sha1_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 4 + + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesctr_sha1_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesctr_sha1_common_defs.sh new file mode 100644 index 000000000..7d2db073b --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesctr_sha1_common_defs.sh @@ -0,0 +1,70 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_mb0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#SP in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +sa in 9 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#SA out rules +sa out 9 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesctr_sha1_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesctr_sha1_defs.sh new file mode 100644 index 000000000..3390055db --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesctr_sha1_defs.sh @@ -0,0 +1,69 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/trs_aesctr_sha1_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 4 + + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode transport replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesgcm_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesgcm_common_defs.sh new file mode 100644 index 000000000..47eef4d9b --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesgcm_common_defs.sh @@ -0,0 +1,61 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_gcm0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#SP in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +sa in 9 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +sa out 9 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode transport ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesgcm_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesgcm_defs.sh new file mode 100644 index 000000000..48c2687d0 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_aesgcm_defs.sh @@ -0,0 +1,68 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/trs_aesgcm_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode transport replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode transport replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl proto esp mode transport reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl proto esp mode transport reqid 4 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode transport replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode transport replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/trs_ipv6opts.py b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_ipv6opts.py new file mode 100755 index 000000000..95011861e --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/trs_ipv6opts.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: BSD-3-Clause + +from scapy.all import * +import unittest +import pkttest + + +SRC_ADDR = "1111:0000:0000:0000:0000:0000:0000:0001" +DST_ADDR = "2222:0000:0000:0000:0000:0000:0000:0001" +SRC_NET = "1111:0000:0000:0000:0000:0000:0000:0000/64" +DST_NET = "2222:0000:0000:0000:0000:0000:0000:0000/64" + + +def config(): + return """ +sp ipv6 out esp protect 5 pri 1 \\ +src {0} \\ +dst {1} \\ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 6 pri 1 \\ +src {1} \\ +dst {0} \\ +sport 0:65535 dport 0:65535 + +sa out 5 cipher_algo null auth_algo null mode transport +sa in 6 cipher_algo null auth_algo null mode transport + +rt ipv6 dst {0} port 1 +rt ipv6 dst {1} port 0 +""".format(SRC_NET, DST_NET) + + +class TestTransportWithIPv6Ext(unittest.TestCase): + # There is a bug in the IPsec Scapy implementation + # which causes invalid packet reconstruction after + # successful decryption. This method is a workaround. + @staticmethod + def decrypt(pkt, sa): + esp = pkt[ESP] + + # decrypt dummy packet with no extensions + d = sa.decrypt(IPv6()/esp) + + # fix 'next header' in the preceding header of the original + # packet and remove ESP + pkt[ESP].underlayer.nh = d[IPv6].nh + pkt[ESP].underlayer.remove_payload() + + # combine L3 header with decrypted payload + npkt = pkt/d[IPv6].payload + + # fix length + npkt[IPv6].plen = d[IPv6].plen + len(pkt[IPv6].payload) + + return npkt + + def setUp(self): + self.px = pkttest.PacketXfer() + self.outb_sa = SecurityAssociation(ESP, spi=5) + self.inb_sa = SecurityAssociation(ESP, spi=6) + + def test_outb_ipv6_noopt(self): + pkt = IPv6(src=SRC_ADDR, dst=DST_ADDR) + pkt /= UDP(sport=123,dport=456)/Raw(load="abc") + + # send and check response + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 5) + + # decrypt response, check packet after decryption + d = TestTransportWithIPv6Ext.decrypt(resp[IPv6], self.outb_sa) + self.assertEqual(d[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(d[UDP].sport, 123) + self.assertEqual(d[UDP].dport, 456) + self.assertEqual(bytes(d[UDP].payload), b'abc') + + def test_outb_ipv6_opt(self): + hoptions = [] + hoptions.append(RouterAlert(value=2)) + hoptions.append(Jumbo(jumboplen=5000)) + hoptions.append(Pad1()) + + doptions = [] + doptions.append(HAO(hoa="1234::4321")) + + pkt = IPv6(src=SRC_ADDR, dst=DST_ADDR) + pkt /= IPv6ExtHdrHopByHop(options=hoptions) + pkt /= IPv6ExtHdrRouting(addresses=["3333::3","4444::4"]) + pkt /= IPv6ExtHdrDestOpt(options=doptions) + pkt /= UDP(sport=123,dport=456)/Raw(load="abc") + + # send and check response + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_HOPOPTS) + + # check extensions + self.assertEqual(resp[IPv6ExtHdrHopByHop].nh, socket.IPPROTO_ROUTING) + self.assertEqual(resp[IPv6ExtHdrRouting].nh, socket.IPPROTO_DSTOPTS) + self.assertEqual(resp[IPv6ExtHdrDestOpt].nh, socket.IPPROTO_ESP) + + # check ESP + self.assertEqual(resp[ESP].spi, 5) + + # decrypt response, check packet after decryption + d = TestTransportWithIPv6Ext.decrypt(resp[IPv6], self.outb_sa) + self.assertEqual(d[IPv6].nh, socket.IPPROTO_HOPOPTS) + self.assertEqual(d[IPv6ExtHdrHopByHop].nh, socket.IPPROTO_ROUTING) + self.assertEqual(d[IPv6ExtHdrRouting].nh, socket.IPPROTO_DSTOPTS) + self.assertEqual(d[IPv6ExtHdrDestOpt].nh, socket.IPPROTO_UDP) + + # check UDP + self.assertEqual(d[UDP].sport, 123) + self.assertEqual(d[UDP].dport, 456) + self.assertEqual(bytes(d[UDP].payload), b'abc') + + def test_inb_ipv6_noopt(self): + # encrypt and send raw UDP packet + pkt = IPv6(src=DST_ADDR, dst=SRC_ADDR) + pkt /= UDP(sport=123,dport=456)/Raw(load="abc") + e = self.inb_sa.encrypt(pkt) + + # send and check response + resp = self.px.xfer_protected(e) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + + # check UDP packet + self.assertEqual(resp[UDP].sport, 123) + self.assertEqual(resp[UDP].dport, 456) + self.assertEqual(bytes(resp[UDP].payload), b'abc') + + def test_inb_ipv6_opt(self): + hoptions = [] + hoptions.append(RouterAlert(value=2)) + hoptions.append(Jumbo(jumboplen=5000)) + hoptions.append(Pad1()) + + doptions = [] + doptions.append(HAO(hoa="1234::4321")) + + # prepare packet with options + pkt = IPv6(src=DST_ADDR, dst=SRC_ADDR) + pkt /= IPv6ExtHdrHopByHop(options=hoptions) + pkt /= IPv6ExtHdrRouting(addresses=["3333::3","4444::4"]) + pkt /= IPv6ExtHdrDestOpt(options=doptions) + pkt /= UDP(sport=123,dport=456)/Raw(load="abc") + e = self.inb_sa.encrypt(pkt) + + # self encrypted packet and check response + resp = self.px.xfer_protected(e) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_HOPOPTS) + self.assertEqual(resp[IPv6ExtHdrHopByHop].nh, socket.IPPROTO_ROUTING) + self.assertEqual(resp[IPv6ExtHdrRouting].nh, socket.IPPROTO_DSTOPTS) + self.assertEqual(resp[IPv6ExtHdrDestOpt].nh, socket.IPPROTO_UDP) + + # check UDP + self.assertEqual(resp[UDP].sport, 123) + self.assertEqual(resp[UDP].dport, 456) + self.assertEqual(bytes(resp[UDP].payload), b'abc') + + def test_inb_ipv6_frag(self): + # prepare ESP payload + pkt = IPv6()/UDP(sport=123,dport=456)/Raw(load="abc") + e = self.inb_sa.encrypt(pkt) + + # craft and send inbound packet + e = IPv6(src=DST_ADDR, dst=SRC_ADDR)/IPv6ExtHdrFragment()/e[IPv6].payload + resp = self.px.xfer_protected(e) + + # check response + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_FRAGMENT) + self.assertEqual(resp[IPv6ExtHdrFragment].nh, socket.IPPROTO_UDP) + + # check UDP + self.assertEqual(resp[UDP].sport, 123) + self.assertEqual(resp[UDP].dport, 456) + self.assertEqual(bytes(resp[UDP].payload), b'abc') + + +pkttest.pkttest() diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_3descbc_sha1_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_3descbc_sha1_common_defs.sh new file mode 100644 index 000000000..8804139df --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_3descbc_sha1_common_defs.sh @@ -0,0 +1,141 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_mb0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} ${SGW_CFG_XPRM_IN} + +sa in 9 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} ${SGW_CFG_XPRM_OUT} + +sa out 9 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} + +config_secgw_mixed() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 6 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 6 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 8 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 8 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 8 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + +sa in 6 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + +#SA out rules +sa out 8 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + +sa out 6 cipher_algo 3des-cbc \ +cipher_key \ +de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_3descbc_sha1_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_3descbc_sha1_defs.sh new file mode 100644 index 000000000..7c2d065ab --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_3descbc_sha1_defs.sh @@ -0,0 +1,142 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/tun_3descbc_sha1_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_46() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 6 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 6 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_64() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 8 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 8 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 4 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aescbc_sha1_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aescbc_sha1_common_defs.sh new file mode 100644 index 000000000..9e2276997 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aescbc_sha1_common_defs.sh @@ -0,0 +1,133 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_mb0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} ${SGW_CFG_XPRM_IN} + +sa in 9 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} ${SGW_CFG_XPRM_OUT} + +sa out 9 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} + +config_secgw_mixed() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 6 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 6 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 8 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 8 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 8 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + +sa in 6 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + +#SA out rules +sa out 8 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + +sa out 6 cipher_algo aes-128-cbc \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aescbc_sha1_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aescbc_sha1_defs.sh new file mode 100644 index 000000000..b95d81458 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aescbc_sha1_defs.sh @@ -0,0 +1,142 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/tun_aescbc_sha1_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_46() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 6 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 6 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_64() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 8 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 8 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 4 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesctr_sha1_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesctr_sha1_common_defs.sh new file mode 100644 index 000000000..0f0111d84 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesctr_sha1_common_defs.sh @@ -0,0 +1,133 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_mb0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} ${SGW_CFG_XPRM_IN} + +sa in 9 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} ${SGW_CFG_XPRM_OUT} + +sa out 9 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} + +config_secgw_mixed() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 6 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 6 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 8 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 8 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 8 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + +sa in 6 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + +#SA out rules +sa out 8 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + +sa out 6 cipher_algo aes-128-ctr \ +cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +auth_algo sha1-hmac \ +auth_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesctr_sha1_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesctr_sha1_defs.sh new file mode 100644 index 000000000..fd92f8769 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesctr_sha1_defs.sh @@ -0,0 +1,142 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/tun_aesctr_sha1_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_46() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 6 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 6 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_64() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 8 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 8 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +sel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 4 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode tunnel replay-window 64 ${XFRM_ESN} \ +auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \ +enc "rfc3686\(ctr\(aes\)\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesgcm_common_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesgcm_common_defs.sh new file mode 100644 index 000000000..bf4956293 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesgcm_common_defs.sh @@ -0,0 +1,117 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +CRYPTO_DEV=${CRYPTO_DEV:-'--vdev="crypto_aesni_gcm0"'} + +#generate cfg file for ipsec-secgw +config_secgw() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 7 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 7 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 9 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 9 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 7 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} ${SGW_CFG_XPRM_IN} + +sa in 9 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 7 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} ${SGW_CFG_XPRM_OUT} + +sa out 9 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} + +config_secgw_mixed() +{ + cat <<EOF > ${SGW_CFG_FILE} +#sp in IPv4 rules +sp ipv4 in esp protect 6 pri 2 src ${REMOTE_IPV4}/32 dst ${LOCAL_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv4 rules +sp ipv4 out esp protect 6 pri 2 src ${LOCAL_IPV4}/32 dst ${REMOTE_IPV4}/32 \ +sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#sp in IPv6 rules +sp ipv6 in esp protect 8 pri 2 src ${REMOTE_IPV6}/128 dst ${LOCAL_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SP out IPv6 rules +sp ipv6 out esp protect 8 pri 2 src ${LOCAL_IPV6}/128 dst ${REMOTE_IPV6}/128 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp bypass pri 1 sport 0:65535 dport 0:65535 + +#SA in rules +sa in 8 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} ${SGW_CFG_XPRM_IN} + +sa in 6 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} ${SGW_CFG_XPRM_IN} + +#SA out rules +sa out 8 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv4-tunnel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} ${SGW_CFG_XPRM_OUT} + +sa out 6 aead_algo aes-128-gcm \ +aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \ +mode ipv6-tunnel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} ${SGW_CFG_XPRM_OUT} + +#Routing rules +rt ipv4 dst ${REMOTE_IPV4}/32 port 0 +rt ipv4 dst ${LOCAL_IPV4}/32 port 1 + +rt ipv6 dst ${REMOTE_IPV6}/128 port 0 +rt ipv6 dst ${LOCAL_IPV6}/128 port 1 + +#neighbours +neigh port 0 ${REMOTE_MAC} +neigh port 1 ${LOCAL_MAC} +EOF + + cat ${SGW_CFG_FILE} +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesgcm_defs.sh b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesgcm_defs.sh new file mode 100644 index 000000000..2528d02c8 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_aesgcm_defs.sh @@ -0,0 +1,142 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +. ${DIR}/tun_aesgcm_common_defs.sh + +SGW_CMD_XPRM="${DPDK_VARS} ${DPDK_MODE} ${SGW_CMD_XPRM}" + +config_remote_xfrm_44() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 7 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 7 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_46() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 6 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 \ +sel src ${REMOTE_IPV4} dst ${LOCAL_IPV4} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 6 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 \ +sel src ${LOCAL_IPV4} dst ${REMOTE_IPV4} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_64() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp mode tunnel reqid 1 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp mode tunnel reqid 2 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \ +proto esp spi 8 reqid 1 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 \ +sel src ${REMOTE_IPV6} dst ${LOCAL_IPV6} + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \ +proto esp spi 8 reqid 2 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 \ +sel src ${LOCAL_IPV6} dst ${REMOTE_IPV6} + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} + +config_remote_xfrm_66() +{ + ssh ${REMOTE_HOST} ip xfrm policy flush + ssh ${REMOTE_HOST} ip xfrm state flush + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +dir out ptype main action allow \ +tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp mode tunnel reqid 3 + + ssh ${REMOTE_HOST} ip xfrm policy add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +dir in ptype main action allow \ +tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp mode tunnel reqid 4 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \ +proto esp spi 9 reqid 3 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm state add \ +src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \ +proto esp spi 9 reqid 4 mode tunnel replay-window 64 ${XFRM_ESN} \ +aead "rfc4106\(gcm\(aes\)\)" \ +0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 128 + + ssh ${REMOTE_HOST} ip xfrm policy list + ssh ${REMOTE_HOST} ip xfrm state list +} diff --git a/src/spdk/dpdk/examples/ipsec-secgw/test/tun_null_header_reconstruct.py b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_null_header_reconstruct.py new file mode 100755 index 000000000..d4f42dfc0 --- /dev/null +++ b/src/spdk/dpdk/examples/ipsec-secgw/test/tun_null_header_reconstruct.py @@ -0,0 +1,479 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Intel Corporation + +from scapy.all import * +import unittest +import pkttest + +#{ipv4{ipv4}} test +SRC_ADDR_IPV4_1 = "192.168.1.1" +DST_ADDR_IPV4_1 = "192.168.2.1" + +#{ipv6{ipv6}} test +SRC_ADDR_IPV6_1 = "1111:0000:0000:0000:0000:0000:0000:0001" +DST_ADDR_IPV6_1 = "2222:0000:0000:0000:0000:0000:0000:0001" + +#{ipv4{ipv6}} test +SRC_ADDR_IPV4_2 = "192.168.11.1" +DST_ADDR_IPV4_2 = "192.168.12.1" +SRC_ADDR_IPV6_2 = "1111:0000:0000:0000:0000:0000:0001:0001" +DST_ADDR_IPV6_2 = "2222:0000:0000:0000:0000:0000:0001:0001" + +#{ipv6{ipv4}} test +SRC_ADDR_IPV4_3 = "192.168.21.1" +DST_ADDR_IPV4_3 = "192.168.22.1" +SRC_ADDR_IPV6_3 = "1111:0000:0000:0000:0000:0001:0001:0001" +DST_ADDR_IPV6_3 = "2222:0000:0000:0000:0000:0001:0001:0001" + +def config(): + return """ +#outter-ipv4 inner-ipv4 tunnel mode test +sp ipv4 out esp protect 5 pri 1 \\ +src {0}/32 \\ +dst {1}/32 \\ +sport 0:65535 dport 0:65535 + +sp ipv4 in esp protect 6 pri 1 \\ +src {1}/32 \\ +dst {0}/32 \\ +sport 0:65535 dport 0:65535 + +sa out 5 cipher_algo null auth_algo null mode ipv4-tunnel \\ +src {0} dst {1} +sa in 6 cipher_algo null auth_algo null mode ipv4-tunnel \\ +src {1} dst {0} + +rt ipv4 dst {0}/32 port 1 +rt ipv4 dst {1}/32 port 0 + +#outter-ipv6 inner-ipv6 tunnel mode test +sp ipv6 out esp protect 7 pri 1 \\ +src {2}/128 \\ +dst {3}/128 \\ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 8 pri 1 \\ +src {3}/128 \\ +dst {2}/128 \\ +sport 0:65535 dport 0:65535 + +sa out 7 cipher_algo null auth_algo null mode ipv6-tunnel \\ +src {2} dst {3} +sa in 8 cipher_algo null auth_algo null mode ipv6-tunnel \\ +src {3} dst {2} + +rt ipv6 dst {2}/128 port 1 +rt ipv6 dst {3}/128 port 0 + +#outter-ipv4 inner-ipv6 tunnel mode test +sp ipv6 out esp protect 9 pri 1 \\ +src {4}/128 \\ +dst {5}/128 \\ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 10 pri 1 \\ +src {5}/128 \\ +dst {4}/128 \\ +sport 0:65535 dport 0:65535 + +sa out 9 cipher_algo null auth_algo null mode ipv4-tunnel \\ +src {6} dst {7} +sa in 10 cipher_algo null auth_algo null mode ipv4-tunnel \\ +src {7} dst {6} + +rt ipv6 dst {4}/128 port 1 +rt ipv4 dst {7}/32 port 0 + +#outter-ipv6 inner-ipv4 tunnel mode test +sp ipv4 out esp protect 11 pri 1 \\ +src {8}/32 \\ +dst {9}/32 \\ +sport 0:65535 dport 0:65535 + +sp ipv4 in esp protect 12 pri 1 \\ +src {9}/32 \\ +dst {8}/32 \\ +sport 0:65535 dport 0:65535 + +sa out 11 cipher_algo null auth_algo null mode ipv6-tunnel \\ +src {10} dst {11} +sa in 12 cipher_algo null auth_algo null mode ipv6-tunnel \\ +src {11} dst {10} + +rt ipv4 dst {8}/32 port 1 +rt ipv6 dst {11}/128 port 0 +""".format(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1, + SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1, + SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, SRC_ADDR_IPV4_2, DST_ADDR_IPV4_2, + SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, SRC_ADDR_IPV6_3, DST_ADDR_IPV6_3) + +ECN_ECT0 = 0x02 +ECN_ECT1 = 0x01 +ECN_CE = 0x03 +DSCP_1 = 0x04 +DSCP_3F = 0xFC + +class TestTunnelHeaderReconstruct(unittest.TestCase): + def setUp(self): + self.px = pkttest.PacketXfer() + th = IP(src=DST_ADDR_IPV4_1, dst=SRC_ADDR_IPV4_1) + self.sa_ipv4v4 = SecurityAssociation(ESP, spi=6, tunnel_header = th) + + th = IPv6(src=DST_ADDR_IPV6_1, dst=SRC_ADDR_IPV6_1) + self.sa_ipv6v6 = SecurityAssociation(ESP, spi=8, tunnel_header = th) + + th = IP(src=DST_ADDR_IPV4_2, dst=SRC_ADDR_IPV4_2) + self.sa_ipv4v6 = SecurityAssociation(ESP, spi=10, tunnel_header = th) + + th = IPv6(src=DST_ADDR_IPV6_3, dst=SRC_ADDR_IPV6_3) + self.sa_ipv6v4 = SecurityAssociation(ESP, spi=12, tunnel_header = th) + + def gen_pkt_plain_ipv4(self, src, dst, tos): + pkt = IP(src=src, dst=dst, tos=tos) + pkt /= UDP(sport=123,dport=456)/Raw(load="abc") + return pkt + + def gen_pkt_plain_ipv6(self, src, dst, tc): + pkt = IPv6(src=src, dst=dst, tc=tc) + pkt /= UDP(sport=123,dport=456)/Raw(load="abc") + return pkt + + def gen_pkt_tun_ipv4v4(self, tos_outter, tos_inner): + pkt = self.gen_pkt_plain_ipv4(DST_ADDR_IPV4_1, SRC_ADDR_IPV4_1, + tos_inner) + pkt = self.sa_ipv4v4.encrypt(pkt) + self.assertEqual(pkt[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(pkt[ESP].spi, 6) + pkt[IP].tos = tos_outter + return pkt + + def gen_pkt_tun_ipv6v6(self, tc_outter, tc_inner): + pkt = self.gen_pkt_plain_ipv6(DST_ADDR_IPV6_1, SRC_ADDR_IPV6_1, + tc_inner) + pkt = self.sa_ipv6v6.encrypt(pkt) + self.assertEqual(pkt[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(pkt[ESP].spi, 8) + pkt[IPv6].tc = tc_outter + return pkt + + def gen_pkt_tun_ipv4v6(self, tos_outter, tc_inner): + pkt = self.gen_pkt_plain_ipv6(DST_ADDR_IPV6_2, SRC_ADDR_IPV6_2, + tc_inner) + pkt = self.sa_ipv4v6.encrypt(pkt) + self.assertEqual(pkt[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(pkt[ESP].spi, 10) + pkt[IP].tos = tos_outter + return pkt + + def gen_pkt_tun_ipv6v4(self, tc_outter, tos_inner): + pkt = self.gen_pkt_plain_ipv4(DST_ADDR_IPV4_3, SRC_ADDR_IPV4_3, + tos_inner) + pkt = self.sa_ipv6v4.encrypt(pkt) + self.assertEqual(pkt[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(pkt[ESP].spi, 12) + pkt[IPv6].tc = tc_outter + return pkt + +#RFC4301 5.1.2.1 & 5.1.2.2, outbound packets shall be copied ECN field + def test_outb_ipv4v4_ecn(self): + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1, + ECN_ECT1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 5) + self.assertEqual(resp[IP].tos, ECN_ECT1) + + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1, + ECN_ECT0) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 5) + self.assertEqual(resp[IP].tos, ECN_ECT0) + + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1, + ECN_CE) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 5) + self.assertEqual(resp[IP].tos, ECN_CE) + + def test_outb_ipv6v6_ecn(self): + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1, + ECN_ECT1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[IPv6].tc, ECN_ECT1) + + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1, + ECN_ECT0) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 7) + self.assertEqual(resp[IPv6].tc, ECN_ECT0) + + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1, + ECN_CE) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 7) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + def test_outb_ipv4v6_ecn(self): + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, + ECN_ECT1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[IP].tos, ECN_ECT1) + + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, + ECN_ECT0) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[IP].tos, ECN_ECT0) + + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, + ECN_CE) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[IP].tos, ECN_CE) + + def test_outb_ipv6v4_ecn(self): + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, + ECN_ECT1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[IPv6].tc, ECN_ECT1) + + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, + ECN_ECT0) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[IPv6].tc, ECN_ECT0) + + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, + ECN_CE) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + +#RFC4301 5.1.2.1 & 5.1.2.2, if outbound packets ECN is CE (0x3), inbound packets +#ECN is overwritten to CE, otherwise no change + +#Outter header not CE, Inner header should be no change + def test_inb_ipv4v4_ecn_inner_no_change(self): + pkt = self.gen_pkt_tun_ipv4v4(ECN_ECT1, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_ECT0) + + pkt = self.gen_pkt_tun_ipv4v4(ECN_ECT0, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_ECT1) + + pkt = self.gen_pkt_tun_ipv4v4(ECN_ECT1, ECN_CE) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_CE) + + def test_inb_ipv6v6_ecn_inner_no_change(self): + pkt = self.gen_pkt_tun_ipv6v6(ECN_ECT1, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_ECT0) + + pkt = self.gen_pkt_tun_ipv6v6(ECN_ECT0, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_ECT1) + + pkt = self.gen_pkt_tun_ipv6v6(ECN_ECT1, ECN_CE) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + def test_inb_ipv4v6_ecn_inner_no_change(self): + pkt = self.gen_pkt_tun_ipv4v6(ECN_ECT1, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_ECT0) + + pkt = self.gen_pkt_tun_ipv4v6(ECN_ECT0, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_ECT1) + + pkt = self.gen_pkt_tun_ipv4v6(ECN_ECT1, ECN_CE) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + def test_inb_ipv6v4_ecn_inner_no_change(self): + pkt = self.gen_pkt_tun_ipv6v4(ECN_ECT1, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_ECT0) + + pkt = self.gen_pkt_tun_ipv6v4(ECN_ECT0, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_ECT1) + + pkt = self.gen_pkt_tun_ipv6v4(ECN_ECT1, ECN_CE) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_CE) + +#Outter header CE, Inner header should be changed to CE + def test_inb_ipv4v4_ecn_inner_change(self): + pkt = self.gen_pkt_tun_ipv4v4(ECN_CE, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_CE) + + pkt = self.gen_pkt_tun_ipv4v4(ECN_CE, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_CE) + + def test_inb_ipv6v6_ecn_inner_change(self): + pkt = self.gen_pkt_tun_ipv6v6(ECN_CE, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + pkt = self.gen_pkt_tun_ipv6v6(ECN_CE, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + def test_inb_ipv4v6_ecn_inner_change(self): + pkt = self.gen_pkt_tun_ipv4v6(ECN_CE, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + pkt = self.gen_pkt_tun_ipv4v6(ECN_CE, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, ECN_CE) + + def test_inb_ipv6v4_ecn_inner_change(self): + pkt = self.gen_pkt_tun_ipv6v4(ECN_CE, ECN_ECT0) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_CE) + + pkt = self.gen_pkt_tun_ipv6v4(ECN_CE, ECN_ECT1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, ECN_CE) + +#RFC4301 5.1.2.1.5 Outer DS field should be copied from Inner DS field + def test_outb_ipv4v4_dscp(self): + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1, + DSCP_1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 5) + self.assertEqual(resp[IP].tos, DSCP_1) + + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_1, DST_ADDR_IPV4_1, + DSCP_3F) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 5) + self.assertEqual(resp[IP].tos, DSCP_3F) + + def test_outb_ipv6v6_dscp(self): + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1, + DSCP_1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 7) + self.assertEqual(resp[IPv6].tc, DSCP_1) + + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_1, DST_ADDR_IPV6_1, + DSCP_3F) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 7) + self.assertEqual(resp[IPv6].tc, DSCP_3F) + + def test_outb_ipv4v6_dscp(self): + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, + DSCP_1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 9) + self.assertEqual(resp[IP].tos, DSCP_1) + + pkt = self.gen_pkt_plain_ipv6(SRC_ADDR_IPV6_2, DST_ADDR_IPV6_2, + DSCP_3F) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 9) + self.assertEqual(resp[IP].tos, DSCP_3F) + + def test_outb_ipv6v4_dscp(self): + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, + DSCP_1) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 11) + self.assertEqual(resp[IPv6].tc, DSCP_1) + + pkt = self.gen_pkt_plain_ipv4(SRC_ADDR_IPV4_3, DST_ADDR_IPV4_3, + DSCP_3F) + resp = self.px.xfer_unprotected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_ESP) + self.assertEqual(resp[ESP].spi, 11) + self.assertEqual(resp[IPv6].tc, DSCP_3F) + +#RFC4301 5.1.2.1.5 Inner DS field should not be affected by Outer DS field + def test_inb_ipv4v4_dscp(self): + pkt = self.gen_pkt_tun_ipv4v4(DSCP_3F, DSCP_1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, DSCP_1) + + pkt = self.gen_pkt_tun_ipv4v4(DSCP_1, DSCP_3F) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, DSCP_3F) + + def test_inb_ipv6v6_dscp(self): + pkt = self.gen_pkt_tun_ipv6v6(DSCP_3F, DSCP_1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, DSCP_1) + + pkt = self.gen_pkt_tun_ipv6v6(DSCP_1, DSCP_3F) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, DSCP_3F) + + def test_inb_ipv4v6_dscp(self): + pkt = self.gen_pkt_tun_ipv4v6(DSCP_3F, DSCP_1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, DSCP_1) + + pkt = self.gen_pkt_tun_ipv4v6(DSCP_1, DSCP_3F) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IPv6].nh, socket.IPPROTO_UDP) + self.assertEqual(resp[IPv6].tc, DSCP_3F) + + def test_inb_ipv6v4_dscp(self): + pkt = self.gen_pkt_tun_ipv6v4(DSCP_3F, DSCP_1) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, DSCP_1) + + pkt = self.gen_pkt_tun_ipv6v4(DSCP_1, DSCP_3F) + resp = self.px.xfer_protected(pkt) + self.assertEqual(resp[IP].proto, socket.IPPROTO_UDP) + self.assertEqual(resp[IP].tos, DSCP_3F) + +pkttest.pkttest() |