diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
commit | 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch) | |
tree | e5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/seastar/dpdk/examples/ipsec-secgw | |
parent | Initial commit. (diff) | |
download | ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip |
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/seastar/dpdk/examples/ipsec-secgw')
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/Makefile | 65 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/ep0.cfg | 160 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/ep1.cfg | 160 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/esp.c | 401 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/esp.h | 65 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/ipip.h | 180 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/ipsec-secgw.c | 1499 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/ipsec.c | 235 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/ipsec.h | 234 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/parser.c | 591 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/parser.h | 116 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/rt.c | 235 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/sa.c | 794 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/sp4.c | 535 | ||||
-rw-r--r-- | src/seastar/dpdk/examples/ipsec-secgw/sp6.c | 649 |
15 files changed, 5919 insertions, 0 deletions
diff --git a/src/seastar/dpdk/examples/ipsec-secgw/Makefile b/src/seastar/dpdk/examples/ipsec-secgw/Makefile new file mode 100644 index 00000000..17e91551 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/Makefile @@ -0,0 +1,65 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ifeq ($(RTE_SDK),) + $(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overridden by command line or environment +RTE_TARGET ?= x86_64-native-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +APP = ipsec-secgw + +CFLAGS += -O3 -gdwarf-2 +CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_sa.o += -diag-disable=vec +endif + +ifeq ($(DEBUG),1) +CFLAGS += -DIPSEC_DEBUG -fstack-protector-all -O0 +endif + +# +# all source are stored in SRCS-y +# +SRCS-y += parser.c +SRCS-y += ipsec.c +SRCS-y += esp.c +SRCS-y += sp4.c +SRCS-y += sp6.c +SRCS-y += sa.c +SRCS-y += rt.c +SRCS-y += ipsec-secgw.c + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/src/seastar/dpdk/examples/ipsec-secgw/ep0.cfg b/src/seastar/dpdk/examples/ipsec-secgw/ep0.cfg new file mode 100644 index 00000000..299aa9e0 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/ep0.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint 0 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep0.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 out esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 in esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 out esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 in esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa out 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa out 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa out 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa out 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa out 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa out 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa in 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa in 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa in 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa in 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa in 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.2.5/32 port 0 +rt ipv4 dst 172.16.2.6/32 port 1 +rt ipv4 dst 192.168.175.0/24 port 0 +rt ipv4 dst 192.168.176.0/24 port 1 +rt ipv4 dst 192.168.240.0/24 port 0 +rt ipv4 dst 192.168.241.0/24 port 1 +rt ipv4 dst 192.168.115.0/24 port 2 +rt ipv4 dst 192.168.116.0/24 port 3 +rt ipv4 dst 192.168.65.0/24 port 2 +rt ipv4 dst 192.168.66.0/24 port 3 +rt ipv4 dst 192.168.185.0/24 port 2 +rt ipv4 dst 192.168.186.0/24 port 3 +rt ipv4 dst 192.168.210.0/24 port 2 +rt ipv4 dst 192.168.211.0/24 port 3 +rt ipv4 dst 192.168.245.0/24 port 2 +rt ipv4 dst 192.168.246.0/24 port 3 + +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:5555/116 port 0 +rt ipv6 dst 2222:2222:2222:2222:2222:2222:2222:6666/116 port 1 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/src/seastar/dpdk/examples/ipsec-secgw/ep1.cfg b/src/seastar/dpdk/examples/ipsec-secgw/ep1.cfg new file mode 100644 index 00000000..3f6ff811 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/ep1.cfg @@ -0,0 +1,160 @@ +########################################################################### +# IPSEC-SECGW Endpoint1 sample configuration +# +# The main purpose of this file is to show how to configure two systems +# back-to-back that would forward traffic through an IPsec tunnel. This +# file is the Endpoint1 configuration. To use this configuration file, +# add the following command-line option: +# +# -f ./ep1.cfg +# +########################################################################### + +#SP IPv4 rules +sp ipv4 in esp protect 5 pri 1 dst 192.168.105.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 6 pri 1 dst 192.168.106.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 10 pri 1 dst 192.168.175.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 11 pri 1 dst 192.168.176.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 15 pri 1 dst 192.168.200.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 16 pri 1 dst 192.168.201.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 25 pri 1 dst 192.168.55.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp protect 26 pri 1 dst 192.168.56.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass dst 192.168.240.0/24 sport 0:65535 dport 0:65535 +sp ipv4 in esp bypass dst 192.168.241.0/24 sport 0:65535 dport 0:65535 + +sp ipv4 out esp protect 105 pri 1 dst 192.168.115.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 106 pri 1 dst 192.168.116.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 110 pri 1 dst 192.168.185.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 111 pri 1 dst 192.168.186.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 116 pri 1 dst 192.168.211.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 115 pri 1 dst 192.168.210.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 125 pri 1 dst 192.168.65.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp protect 126 pri 1 dst 192.168.66.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.245.0/24 sport 0:65535 dport 0:65535 +sp ipv4 out esp bypass pri 1 dst 192.168.246.0/24 sport 0:65535 dport 0:65535 + +#SP IPv6 rules +sp ipv6 in esp protect 5 pri 1 dst 0000:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 6 pri 1 dst 0000:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 10 pri 1 dst 0000:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 11 pri 1 dst 0000:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 25 pri 1 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 in esp protect 26 pri 1 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +sp ipv6 out esp protect 15 pri 1 dst ffff:0000:0000:0000:5555:5555:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 16 pri 1 dst ffff:0000:0000:0000:6666:6666:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 110 pri 1 dst ffff:0000:1111:1111:0000:0000:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 111 pri 1 dst ffff:0000:1111:1111:1111:1111:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 125 pri 1 dst ffff:0000:0000:0000:aaaa:aaaa:0000:0000/96 \ +sport 0:65535 dport 0:65535 +sp ipv6 out esp protect 126 pri 1 dst ffff:0000:0000:0000:bbbb:bbbb:0000:0000/96 \ +sport 0:65535 dport 0:65535 + +#SA rules +sa in 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 + +sa in 6 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.1.6 dst 172.16.2.6 + +sa in 10 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa in 11 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa in 15 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.5 \ +dst 172.16.2.5 + +sa in 16 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.1.6 \ +dst 172.16.2.6 + +sa in 25 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:5555 \ +dst 2222:2222:2222:2222:2222:2222:2222:5555 + +sa in 26 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 1111:1111:1111:1111:1111:1111:1111:6666 \ +dst 2222:2222:2222:2222:2222:2222:2222:6666 + +sa out 105 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \ +mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5 + +sa out 106 cipher_algo aes-128-cbc cipher_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0 auth_algo sha1-hmac auth_key a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:a0:\ +a0:a0:a0:a0:a0:a0:a0:a0:a0 mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 110 cipher_algo aes-128-cbc cipher_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1 auth_algo sha1-hmac auth_key a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:\ +a1:a1:a1:a1:a1:a1:a1:a1:a1 mode transport + +sa out 111 cipher_algo aes-128-cbc cipher_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2 auth_algo sha1-hmac auth_key b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:b2:\ +b2:b2:b2:b2:b2:b2:b2:b2:b2 mode transport + +sa out 115 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.5 \ +dst 172.16.1.5 + +sa out 116 cipher_algo null auth_algo null mode ipv4-tunnel src 172.16.2.6 dst 172.16.1.6 + +sa out 125 cipher_algo aes-128-cbc cipher_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3 auth_algo sha1-hmac auth_key c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:c3:\ +c3:c3:c3:c3:c3:c3:c3:c3:c3 mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:5555 \ +dst 1111:1111:1111:1111:1111:1111:1111:5555 + +sa out 126 cipher_algo aes-128-cbc cipher_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d auth_algo sha1-hmac auth_key 4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:4d:\ +4d:4d:4d:4d:4d:4d:4d:4d:4d mode ipv6-tunnel \ +src 2222:2222:2222:2222:2222:2222:2222:6666 \ +dst 1111:1111:1111:1111:1111:1111:1111:6666 + +#Routing rules +rt ipv4 dst 172.16.1.5/32 port 0 +rt ipv4 dst 172.16.1.6/32 port 1 +rt ipv4 dst 192.168.185.0/24 port 0 +rt ipv4 dst 192.168.186.0/24 port 1 +rt ipv4 dst 192.168.245.0/24 port 0 +rt ipv4 dst 192.168.246.0/24 port 1 +rt ipv4 dst 192.168.105.0/24 port 2 +rt ipv4 dst 192.168.106.0/24 port 3 +rt ipv4 dst 192.168.55.0/24 port 2 +rt ipv4 dst 192.168.56.0/24 port 3 +rt ipv4 dst 192.168.175.0/24 port 2 +rt ipv4 dst 192.168.176.0/24 port 3 +rt ipv4 dst 192.168.200.0/24 port 2 +rt ipv4 dst 192.168.201.0/24 port 3 +rt ipv4 dst 192.168.240.0/24 port 2 +rt ipv4 dst 192.168.241.0/24 port 3 + +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:5555/116 port 0 +rt ipv6 dst 1111:1111:1111:1111:1111:1111:1111:6666/116 port 1 +rt ipv6 dst ffff:0000:1111:1111:0000:0000:0000:0000/116 port 0 +rt ipv6 dst ffff:0000:1111:1111:1111:1111:0000:0000/116 port 1 +rt ipv6 dst 0000:0000:0000:0000:aaaa:aaaa:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:bbbb:bbbb:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:0000:0000:5555:5555:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:0000:0000:6666:6666:0000:0000/116 port 3 +rt ipv6 dst 0000:0000:1111:1111:0000:0000:0000:0000/116 port 2 +rt ipv6 dst 0000:0000:1111:1111:1111:1111:0000:0000/116 port 3 diff --git a/src/seastar/dpdk/examples/ipsec-secgw/esp.c b/src/seastar/dpdk/examples/ipsec-secgw/esp.c new file mode 100644 index 00000000..e77afa0e --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/esp.c @@ -0,0 +1,401 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> +#include <fcntl.h> +#include <unistd.h> + +#include <rte_common.h> +#include <rte_crypto.h> +#include <rte_cryptodev.h> +#include <rte_random.h> + +#include "ipsec.h" +#include "esp.h" +#include "ipip.h" + +int +esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4; + struct rte_crypto_sym_op *sym_cop; + int32_t payload_len, ip_hdr_len; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + RTE_ASSERT(cop != NULL); + + ip4 = rte_pktmbuf_mtod(m, struct ip *); + if (likely(ip4->ip_v == IPVERSION)) + ip_hdr_len = ip4->ip_hl * 4; + else if (ip4->ip_v == IP6_VERSION) + /* XXX No option headers supported */ + ip_hdr_len = sizeof(struct ip6_hdr); + else { + RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", + ip4->ip_v); + return -EINVAL; + } + + payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - + sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; + + if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { + RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", + payload_len, sa->block_size); + return -EINVAL; + } + + sym_cop = get_sym_cop(cop); + + sym_cop->m_src = m; + sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + + sa->iv_len; + sym_cop->cipher.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *aad; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + sym_cop->cipher.iv.data = iv; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + ip_hdr_len + sizeof(struct esp_hdr)); + sym_cop->cipher.iv.length = sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + case RTE_CRYPTO_CIPHER_AES_GCM: + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + sym_cop->cipher.iv.data = (uint8_t *)icb; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->cipher.iv.length = 16; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + payload_len; + break; + case RTE_CRYPTO_AUTH_AES_GCM: + aad = get_aad(m); + memcpy(aad, iv - sizeof(struct esp_hdr), 8); + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->auth.aad.length = 8; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.length = sa->digest_len; + + return 0; +} + +int +esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4, *ip; + struct ip6_hdr *ip6; + uint8_t *nexthdr, *pad_len; + uint8_t *padding; + uint16_t i; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + RTE_ASSERT(cop != NULL); + + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); + return -1; + } + + nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, + rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); + pad_len = nexthdr - 1; + + padding = pad_len - *pad_len; + for (i = 0; i < *pad_len; i++) { + if (padding[i] != i + 1) { + RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + return -EINVAL; + } + } + + if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { + RTE_LOG(ERR, IPSEC_ESP, + "failed to remove pad_len + digest\n"); + return -EINVAL; + } + + if (unlikely(sa->flags == TRANSPORT)) { + ip = rte_pktmbuf_mtod(m, struct ip *); + ip4 = (struct ip *)rte_pktmbuf_adj(m, + sizeof(struct esp_hdr) + sa->iv_len); + if (likely(ip->ip_v == IPVERSION)) { + memmove(ip4, ip, ip->ip_hl * 4); + ip4->ip_p = *nexthdr; + ip4->ip_len = htons(rte_pktmbuf_data_len(m)); + } else { + ip6 = (struct ip6_hdr *)ip4; + /* XXX No option headers supported */ + memmove(ip6, ip, sizeof(struct ip6_hdr)); + ip6->ip6_nxt = *nexthdr; + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + } + } else + ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); + + return 0; +} + +int +esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop) +{ + struct ip *ip4; + struct ip6_hdr *ip6; + struct esp_hdr *esp = NULL; + uint8_t *padding, *new_ip, nlp; + struct rte_crypto_sym_op *sym_cop; + int32_t i; + uint16_t pad_payload_len, pad_len, ip_hdr_len; + + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + RTE_ASSERT(cop != NULL); + + ip_hdr_len = 0; + + ip4 = rte_pktmbuf_mtod(m, struct ip *); + if (likely(ip4->ip_v == IPVERSION)) { + if (unlikely(sa->flags == TRANSPORT)) { + ip_hdr_len = ip4->ip_hl * 4; + nlp = ip4->ip_p; + } else + nlp = IPPROTO_IPIP; + } else if (ip4->ip_v == IP6_VERSION) { + if (unlikely(sa->flags == TRANSPORT)) { + /* XXX No option headers supported */ + ip_hdr_len = sizeof(struct ip6_hdr); + ip6 = (struct ip6_hdr *)ip4; + nlp = ip6->ip6_nxt; + } else + nlp = IPPROTO_IPV6; + } else { + RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", + ip4->ip_v); + return -EINVAL; + } + + /* Padded payload length */ + pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - + ip_hdr_len + 2, sa->block_size); + pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); + + RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL || + sa->flags == TRANSPORT); + + if (likely(sa->flags == IP4_TUNNEL)) + ip_hdr_len = sizeof(struct ip); + else if (sa->flags == IP6_TUNNEL) + ip_hdr_len = sizeof(struct ip6_hdr); + else if (sa->flags != TRANSPORT) { + RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", + sa->flags); + return -EINVAL; + } + + /* Check maximum packet size */ + if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len + + pad_payload_len + sa->digest_len > IP_MAXPACKET)) { + RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); + return -EINVAL; + } + + padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len); + if (unlikely(padding == NULL)) { + RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n"); + return -ENOSPC; + } + rte_prefetch0(padding); + + switch (sa->flags) { + case IP4_TUNNEL: + ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, + &sa->src, &sa->dst); + esp = (struct esp_hdr *)(ip4 + 1); + break; + case IP6_TUNNEL: + ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, + &sa->src, &sa->dst); + esp = (struct esp_hdr *)(ip6 + 1); + break; + case TRANSPORT: + new_ip = (uint8_t *)rte_pktmbuf_prepend(m, + sizeof(struct esp_hdr) + sa->iv_len); + memmove(new_ip, ip4, ip_hdr_len); + esp = (struct esp_hdr *)(new_ip + ip_hdr_len); + if (likely(ip4->ip_v == IPVERSION)) { + ip4 = (struct ip *)new_ip; + ip4->ip_p = IPPROTO_ESP; + ip4->ip_len = htons(rte_pktmbuf_data_len(m)); + } else { + ip6 = (struct ip6_hdr *)new_ip; + ip6->ip6_nxt = IPPROTO_ESP; + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + } + } + + sa->seq++; + esp->spi = rte_cpu_to_be_32(sa->spi); + esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); + + uint64_t *iv = (uint64_t *)(esp + 1); + + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr); + sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + case RTE_CRYPTO_CIPHER_AES_GCM: + *iv = sa->seq; + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->cipher.data.length = pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = sa->seq; + icb->cnt = rte_cpu_to_be_32(1); + sym_cop->cipher.iv.data = (uint8_t *)icb; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->cipher.iv.length = 16; + + uint8_t *aad; + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + pad_payload_len; + break; + case RTE_CRYPTO_AUTH_AES_GCM: + aad = get_aad(m); + memcpy(aad, esp, 8); + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->auth.aad.length = 8; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.length = sa->digest_len; + + return 0; +} + +int +esp_outbound_post(struct rte_mbuf *m __rte_unused, + struct ipsec_sa *sa __rte_unused, + struct rte_crypto_op *cop) +{ + RTE_ASSERT(m != NULL); + RTE_ASSERT(sa != NULL); + RTE_ASSERT(cop != NULL); + + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); + return -1; + } + + return 0; +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/esp.h b/src/seastar/dpdk/examples/ipsec-secgw/esp.h new file mode 100644 index 00000000..fa5cc8af --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/esp.h @@ -0,0 +1,65 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __RTE_IPSEC_XFORM_ESP_H__ +#define __RTE_IPSEC_XFORM_ESP_H__ + +struct mbuf; + +/* RFC4303 */ +struct esp_hdr { + uint32_t spi; + uint32_t seq; + /* Payload */ + /* Padding */ + /* Pad Length */ + /* Next Header */ + /* Integrity Check Value - ICV */ +}; + +int +esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +int +esp_outbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +#endif /* __RTE_IPSEC_XFORM_ESP_H__ */ diff --git a/src/seastar/dpdk/examples/ipsec-secgw/ipip.h b/src/seastar/dpdk/examples/ipsec-secgw/ipip.h new file mode 100644 index 00000000..ff1dccdb --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/ipip.h @@ -0,0 +1,180 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __IPIP_H__ +#define __IPIP_H__ + +#include <stdint.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> + +#include <rte_mbuf.h> + +static inline void * +ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6, + struct ip_addr *src, struct ip_addr *dst) +{ + struct ip *inip4, *outip4; + struct ip6_hdr *inip6, *outip6; + uint8_t ds_ecn; + + inip4 = rte_pktmbuf_mtod(m, struct ip *); + + RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION); + + if (inip4->ip_v == IPVERSION) { + /* XXX This should be done by the forwarding engine instead */ + inip4->ip_ttl -= 1; + ds_ecn = inip4->ip_tos; + } else { + inip6 = (struct ip6_hdr *)inip4; + /* XXX This should be done by the forwarding engine instead */ + inip6->ip6_hops -= 1; + ds_ecn = ntohl(inip6->ip6_flow) >> 20; + } + + if (is_ipv6) { + offset += sizeof(struct ip6_hdr); + outip6 = (struct ip6_hdr *)rte_pktmbuf_prepend(m, offset); + + RTE_ASSERT(outip6 != NULL); + + /* Per RFC4301 5.1.2.1 */ + outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20); + outip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + + outip6->ip6_nxt = IPPROTO_ESP; + outip6->ip6_hops = IPDEFTTL; + + memcpy(&outip6->ip6_src.s6_addr, src, 16); + memcpy(&outip6->ip6_dst.s6_addr, dst, 16); + + return outip6; + } + + offset += sizeof(struct ip); + outip4 = (struct ip *)rte_pktmbuf_prepend(m, offset); + + RTE_ASSERT(outip4 != NULL); + + /* Per RFC4301 5.1.2.1 */ + outip4->ip_v = IPVERSION; + outip4->ip_hl = 5; + outip4->ip_tos = ds_ecn; + outip4->ip_len = htons(rte_pktmbuf_data_len(m)); + + outip4->ip_id = 0; + outip4->ip_off = 0; + + outip4->ip_ttl = IPDEFTTL; + outip4->ip_p = IPPROTO_ESP; + + outip4->ip_src.s_addr = src->ip.ip4; + outip4->ip_dst.s_addr = dst->ip.ip4; + + return outip4; +} + +static inline struct ip * +ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, + struct ip_addr *src, struct ip_addr *dst) +{ + return ipip_outbound(m, offset, 0, src, dst); +} + +static inline struct ip6_hdr * +ip6ip_outbound(struct rte_mbuf *m, uint32_t offset, + struct ip_addr *src, struct ip_addr *dst) +{ + return ipip_outbound(m, offset, 1, src, dst); +} + +static inline void +ip4_ecn_setup(struct ip *ip4) +{ + if (ip4->ip_tos & IPTOS_ECN_MASK) + ip4->ip_tos |= IPTOS_ECN_CE; +} + +static inline void +ip6_ecn_setup(struct ip6_hdr *ip6) +{ + if ((ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK) + ip6->ip6_flow = htonl(ntohl(ip6->ip6_flow) | + (IPTOS_ECN_CE << 20)); +} + +static inline void +ipip_inbound(struct rte_mbuf *m, uint32_t offset) +{ + struct ip *inip4, *outip4; + struct ip6_hdr *inip6, *outip6; + uint32_t ip_len, set_ecn; + + outip4 = rte_pktmbuf_mtod(m, struct ip*); + + RTE_ASSERT(outip4->ip_v == IPVERSION || outip4->ip_v == IP6_VERSION); + + if (outip4->ip_v == IPVERSION) { + ip_len = sizeof(struct ip); + set_ecn = ((outip4->ip_tos & IPTOS_ECN_CE) == IPTOS_ECN_CE); + } else { + outip6 = (struct ip6_hdr *)outip4; + ip_len = sizeof(struct ip6_hdr); + set_ecn = ntohl(outip6->ip6_flow) >> 20; + set_ecn = ((set_ecn & IPTOS_ECN_CE) == IPTOS_ECN_CE); + } + + inip4 = (struct ip *)rte_pktmbuf_adj(m, offset + ip_len); + RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION); + + /* Check packet is still bigger than IP header (inner) */ + RTE_ASSERT(rte_pktmbuf_pkt_len(m) > ip_len); + + /* RFC4301 5.1.2.1 Note 6 */ + if (inip4->ip_v == IPVERSION) { + if (set_ecn) + ip4_ecn_setup(inip4); + /* XXX This should be done by the forwarding engine instead */ + inip4->ip_ttl -= 1; + } else { + inip6 = (struct ip6_hdr *)inip4; + if (set_ecn) + ip6_ecn_setup(inip6); + /* XXX This should be done by the forwarding engine instead */ + inip6->ip6_hops -= 1; + } +} + +#endif /* __IPIP_H__ */ diff --git a/src/seastar/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/src/seastar/dpdk/examples/ipsec-secgw/ipsec-secgw.c new file mode 100644 index 00000000..8cbf6ac4 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -0,0 +1,1499 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <inttypes.h> +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> +#include <string.h> +#include <sys/queue.h> +#include <stdarg.h> +#include <errno.h> +#include <getopt.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_eal.h> +#include <rte_launch.h> +#include <rte_atomic.h> +#include <rte_cycles.h> +#include <rte_prefetch.h> +#include <rte_lcore.h> +#include <rte_per_lcore.h> +#include <rte_branch_prediction.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_random.h> +#include <rte_debug.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_acl.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> +#include <rte_hash.h> +#include <rte_jhash.h> +#include <rte_cryptodev.h> + +#include "ipsec.h" +#include "parser.h" + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 + +#define MAX_JUMBO_PKT_LEN 9600 + +#define MEMPOOL_CACHE_SIZE 256 + +#define NB_MBUF (32000) + +#define CDEV_QUEUE_DESC 2048 +#define CDEV_MAP_ENTRIES 1024 +#define CDEV_MP_NB_OBJS 2048 +#define CDEV_MP_CACHE_SZ 64 +#define MAX_QUEUE_PAIRS 1 + +#define OPTION_CONFIG "config" +#define OPTION_SINGLE_SA "single-sa" + +#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ + +#define NB_SOCKETS 4 + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +#define MAX_RX_QUEUE_PER_LCORE 16 + +#define MAX_LCORE_PARAMS 1024 + +#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) + +/* + * Configurable number of RX/TX ring descriptors + */ +#define IPSEC_SECGW_RX_DESC_DEFAULT 128 +#define IPSEC_SECGW_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; +static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; + +#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((a) & 0xff) << 56) | \ + ((uint64_t)((b) & 0xff) << 48) | \ + ((uint64_t)((c) & 0xff) << 40) | \ + ((uint64_t)((d) & 0xff) << 32) | \ + ((uint64_t)((e) & 0xff) << 24) | \ + ((uint64_t)((f) & 0xff) << 16) | \ + ((uint64_t)((g) & 0xff) << 8) | \ + ((uint64_t)(h) & 0xff)) +#else +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((h) & 0xff) << 56) | \ + ((uint64_t)((g) & 0xff) << 48) | \ + ((uint64_t)((f) & 0xff) << 40) | \ + ((uint64_t)((e) & 0xff) << 32) | \ + ((uint64_t)((d) & 0xff) << 24) | \ + ((uint64_t)((c) & 0xff) << 16) | \ + ((uint64_t)((b) & 0xff) << 8) | \ + ((uint64_t)(a) & 0xff)) +#endif +#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) + +#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ + addr.addr_bytes[0], addr.addr_bytes[1], \ + addr.addr_bytes[2], addr.addr_bytes[3], \ + addr.addr_bytes[4], addr.addr_bytes[5], \ + 0, 0) + +/* port/source ethernet addr and destination ethernet addr */ +struct ethaddr_info { + uint64_t src, dst; +}; + +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } +}; + +/* mask of enabled ports */ +static uint32_t enabled_port_mask; +static uint32_t unprotected_port_mask; +static int32_t promiscuous_on = 1; +static int32_t numa_on = 1; /**< NUMA is enabled by default. */ +static uint32_t nb_lcores; +static uint32_t single_sa; +static uint32_t single_sa_idx; + +struct lcore_rx_queue { + uint8_t port_id; + uint8_t queue_id; +} __rte_cache_aligned; + +struct lcore_params { + uint8_t port_id; + uint8_t queue_id; + uint8_t lcore_id; +} __rte_cache_aligned; + +static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; + +static struct lcore_params *lcore_params; +static uint16_t nb_lcore_params; + +static struct rte_hash *cdev_map_in; +static struct rte_hash *cdev_map_out; + +struct buffer { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct lcore_conf { + uint16_t nb_rx_queue; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; + struct buffer tx_mbufs[RTE_MAX_ETHPORTS]; + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; +} __rte_cache_aligned; + +static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +static struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, + .max_rx_pkt_len = ETHER_MAX_LEN, + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 1, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 1, /**< CRC stripped by hardware */ + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | + ETH_RSS_TCP | ETH_RSS_SCTP, + }, + }, + .txmode = { + .mq_mode = ETH_MQ_TX_NONE, + }, +}; + +static struct socket_ctx socket_ctx[NB_SOCKETS]; + +struct traffic_type { + const uint8_t *data[MAX_PKT_BURST * 2]; + struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; + uint32_t res[MAX_PKT_BURST * 2]; + uint32_t num; +}; + +struct ipsec_traffic { + struct traffic_type ipsec; + struct traffic_type ip4; + struct traffic_type ip6; +}; + +static inline void +prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) +{ + uint8_t *nlp; + struct ether_hdr *eth; + + eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); + nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p)); + if (*nlp == IPPROTO_ESP) + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + else { + t->ip4.data[t->ip4.num] = nlp; + t->ip4.pkts[(t->ip4.num)++] = pkt; + } + } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { + nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); + nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt)); + if (*nlp == IPPROTO_ESP) + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + else { + t->ip6.data[t->ip6.num] = nlp; + t->ip6.pkts[(t->ip6.num)++] = pkt; + } + } else { + /* Unknown/Unsupported type, drop the packet */ + RTE_LOG(ERR, IPSEC, "Unsupported packet type\n"); + rte_pktmbuf_free(pkt); + } +} + +static inline void +prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, + uint16_t nb_pkts) +{ + int32_t i; + + t->ipsec.num = 0; + t->ip4.num = 0; + t->ip6.num = 0; + + for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], + void *)); + prepare_one_packet(pkts[i], t); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_one_packet(pkts[i], t); +} + +static inline void +prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port) +{ + struct ip *ip; + struct ether_hdr *ethhdr; + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + + ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN); + + if (ip->ip_v == IPVERSION) { + pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4; + pkt->l3_len = sizeof(struct ip); + pkt->l2_len = ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + } else { + pkt->ol_flags |= PKT_TX_IPV6; + pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l2_len = ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + } + + memcpy(ðhdr->s_addr, ðaddr_tbl[port].src, + sizeof(struct ether_addr)); + memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst, + sizeof(struct ether_addr)); +} + +static inline void +prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port) +{ + int32_t i; + const int32_t prefetch_offset = 2; + + for (i = 0; i < (nb_pkts - prefetch_offset); i++) { + rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); + prepare_tx_pkt(pkts[i], port); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_tx_pkt(pkts[i], port); +} + +/* Send burst of packets on an output interface */ +static inline int32_t +send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) +{ + struct rte_mbuf **m_table; + int32_t ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + prepare_tx_burst(m_table, n, port); + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + if (unlikely(ret < n)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Enqueue a single packet, and send burst if queue is filled */ +static inline int32_t +send_single_packet(struct rte_mbuf *m, uint8_t port) +{ + uint32_t lcore_id; + uint16_t len; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static inline void +inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, + uint16_t lim) +{ + struct rte_mbuf *m; + uint32_t i, j, res, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + res = ip->res[i]; + if (res & BYPASS) { + ip->pkts[j++] = m; + continue; + } + if (res & DISCARD || i < lim) { + rte_pktmbuf_free(m); + continue; + } + /* Only check SPI match for processed IPSec packets */ + sa_idx = ip->res[i] & PROTECT_MASK; + if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) { + rte_pktmbuf_free(m); + continue; + } + ip->pkts[j++] = m; + } + ip->num = j; +} + +static inline void +process_pkts_inbound(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6; + + nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.num, MAX_PKT_BURST); + + n_ip4 = traffic->ip4.num; + n_ip6 = traffic->ip6.num; + + /* SP/ACL Inbound check ipsec and ip4 */ + for (i = 0; i < nb_pkts_in; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m, + uint8_t *, offsetof(struct ip, ip_p)); + } else if (ip->ip_v == IP6_VERSION) { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m, + uint8_t *, + offsetof(struct ip6_hdr, ip6_nxt)); + } else + rte_pktmbuf_free(m); + } + + inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4, + n_ip4); + + inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6, + n_ip6); +} + +static inline void +outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, + struct traffic_type *ipsec) +{ + struct rte_mbuf *m; + uint32_t i, j, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + sa_idx = ip->res[i] & PROTECT_MASK; + if ((ip->res[i] == 0) || (ip->res[i] & DISCARD)) + rte_pktmbuf_free(m); + else if (sa_idx != 0) { + ipsec->res[ipsec->num] = sa_idx; + ipsec->pkts[ipsec->num++] = m; + } else /* BYPASS */ + ip->pkts[j++] = m; + } + ip->num = j; +} + +static inline void +process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint16_t idx, nb_pkts_out, i; + + /* Drop any IPsec traffic from protected ports */ + for (i = 0; i < traffic->ipsec.num; i++) + rte_pktmbuf_free(traffic->ipsec.pkts[i]); + + traffic->ipsec.num = 0; + + outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec); + + outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec); + + nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.res, traffic->ipsec.num, + MAX_PKT_BURST); + + for (i = 0; i < nb_pkts_out; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + } else { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + } + } +} + +static inline void +process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint32_t nb_pkts_in, i, idx; + + /* Drop any IPv4 traffic from unprotected ports */ + for (i = 0; i < traffic->ip4.num; i++) + rte_pktmbuf_free(traffic->ip4.pkts[i]); + + traffic->ip4.num = 0; + + /* Drop any IPv6 traffic from unprotected ports */ + for (i = 0; i < traffic->ip6.num; i++) + rte_pktmbuf_free(traffic->ip6.pkts[i]); + + traffic->ip6.num = 0; + + nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, + traffic->ipsec.num, MAX_PKT_BURST); + + for (i = 0; i < nb_pkts_in; i++) { + m = traffic->ipsec.pkts[i]; + struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) { + idx = traffic->ip4.num++; + traffic->ip4.pkts[idx] = m; + } else { + idx = traffic->ip6.num++; + traffic->ip6.pkts[idx] = m; + } + } +} + +static inline void +process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, + struct ipsec_traffic *traffic) +{ + struct rte_mbuf *m; + uint32_t nb_pkts_out, i; + struct ip *ip; + + /* Drop any IPsec traffic from protected ports */ + for (i = 0; i < traffic->ipsec.num; i++) + rte_pktmbuf_free(traffic->ipsec.pkts[i]); + + traffic->ipsec.num = 0; + + for (i = 0; i < traffic->ip4.num; i++) + traffic->ip4.res[i] = single_sa_idx; + + for (i = 0; i < traffic->ip6.num; i++) + traffic->ip6.res[i] = single_sa_idx; + + nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts, + traffic->ip4.res, traffic->ip4.num, + MAX_PKT_BURST); + + /* They all sue the same SA (ip4 or ip6 tunnel) */ + m = traffic->ipsec.pkts[i]; + ip = rte_pktmbuf_mtod(m, struct ip *); + if (ip->ip_v == IPVERSION) + traffic->ip4.num = nb_pkts_out; + else + traffic->ip6.num = nb_pkts_out; +} + +static inline void +route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + uint32_t hop[MAX_PKT_BURST * 2]; + uint32_t dst_ip[MAX_PKT_BURST * 2]; + uint16_t i, offset; + + if (nb_pkts == 0) + return; + + for (i = 0; i < nb_pkts; i++) { + offset = offsetof(struct ip, ip_dst); + dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i], + uint32_t *, offset); + dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]); + } + + rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts); + + for (i = 0; i < nb_pkts; i++) { + if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) { + rte_pktmbuf_free(pkts[i]); + continue; + } + send_single_packet(pkts[i], hop[i] & 0xff); + } +} + +static inline void +route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + int32_t hop[MAX_PKT_BURST * 2]; + uint8_t dst_ip[MAX_PKT_BURST * 2][16]; + uint8_t *ip6_dst; + uint16_t i, offset; + + if (nb_pkts == 0) + return; + + for (i = 0; i < nb_pkts; i++) { + offset = offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset); + memcpy(&dst_ip[i][0], ip6_dst, 16); + } + + rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, + hop, nb_pkts); + + for (i = 0; i < nb_pkts; i++) { + if (hop[i] == -1) { + rte_pktmbuf_free(pkts[i]); + continue; + } + send_single_packet(pkts[i], hop[i] & 0xff); + } +} + +static inline void +process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, + uint8_t nb_pkts, uint8_t portid) +{ + struct ipsec_traffic traffic; + + prepare_traffic(pkts, &traffic, nb_pkts); + + if (unlikely(single_sa)) { + if (UNPROTECTED_PORT(portid)) + process_pkts_inbound_nosp(&qconf->inbound, &traffic); + else + process_pkts_outbound_nosp(&qconf->outbound, &traffic); + } else { + if (UNPROTECTED_PORT(portid)) + process_pkts_inbound(&qconf->inbound, &traffic); + else + process_pkts_outbound(&qconf->outbound, &traffic); + } + + route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num); + route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); +} + +static inline void +drain_buffers(struct lcore_conf *qconf) +{ + struct buffer *buf; + uint32_t portid; + + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { + buf = &qconf->tx_mbufs[portid]; + if (buf->len == 0) + continue; + send_burst(qconf, buf->len, portid); + buf->len = 0; + } +} + +/* main processing loop */ +static int32_t +main_loop(__attribute__((unused)) void *dummy) +{ + struct rte_mbuf *pkts[MAX_PKT_BURST]; + uint32_t lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; + int32_t i, nb_rx; + uint8_t portid, queueid; + struct lcore_conf *qconf; + int32_t socket_id; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) + / US_PER_S * BURST_TX_DRAIN_US; + struct lcore_rx_queue *rxql; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + rxql = qconf->rx_queue_list; + socket_id = rte_lcore_to_socket_id(lcore_id); + + qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4; + qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6; + qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; + qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; + qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in; + qconf->inbound.cdev_map = cdev_map_in; + qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; + qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; + qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out; + qconf->outbound.cdev_map = cdev_map_out; + + if (qconf->nb_rx_queue == 0) { + RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id); + return 0; + } + + RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->nb_rx_queue; i++) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, + " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", + lcore_id, portid, queueid); + } + + while (1) { + cur_tsc = rte_rdtsc(); + + /* TX queue buffer drain */ + diff_tsc = cur_tsc - prev_tsc; + + if (unlikely(diff_tsc > drain_tsc)) { + drain_buffers(qconf); + prev_tsc = cur_tsc; + } + + /* Read packet from RX queues */ + for (i = 0; i < qconf->nb_rx_queue; ++i) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, + pkts, MAX_PKT_BURST); + + if (nb_rx > 0) + process_pkts(qconf, pkts, nb_rx, portid); + } + } +} + +static int32_t +check_params(void) +{ + uint8_t lcore, portid, nb_ports; + uint16_t i; + int32_t socket_id; + + if (lcore_params == NULL) { + printf("Error: No port/queue/core mappings\n"); + return -1; + } + + nb_ports = rte_eth_dev_count(); + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { + printf("error: lcore %hhu is not enabled in " + "lcore mask\n", lcore); + return -1; + } + socket_id = rte_lcore_to_socket_id(lcore); + if (socket_id != 0 && numa_on == 0) { + printf("warning: lcore %hhu is on socket %d " + "with numa off\n", + lcore, socket_id); + } + portid = lcore_params[i].port_id; + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("port %u is not enabled in port mask\n", portid); + return -1; + } + if (portid >= nb_ports) { + printf("port %u is not present on the board\n", portid); + return -1; + } + } + return 0; +} + +static uint8_t +get_port_nb_rx_queues(const uint8_t port) +{ + int32_t queue = -1; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].port_id == port && + lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } + return (uint8_t)(++queue); +} + +static int32_t +init_lcore_rx_queues(void) +{ + uint16_t i, nb_rx_queue; + uint8_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].nb_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", + nb_rx_queue + 1, lcore); + return -1; + } + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = + lcore_params[i].port_id; + lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = + lcore_params[i].queue_id; + lcore_conf[lcore].nb_rx_queue++; + } + return 0; +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK" + " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]" + " --single-sa SAIDX -f CONFIG_FILE\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -P : enable promiscuous mode\n" + " -u PORTMASK: hexadecimal bitmask of unprotected ports\n" + " --"OPTION_CONFIG": (port,queue,lcore): " + "rx queues configuration\n" + " --single-sa SAIDX: use single SA index for outbound, " + "bypassing the SP\n" + " -f CONFIG_FILE: Configuration file path\n", + prgname); +} + +static int32_t +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if ((pm == 0) && errno) + return -1; + + return pm; +} + +static int32_t +parse_decimal(const char *str) +{ + char *end = NULL; + unsigned long num; + + num = strtoul(str, &end, 10); + if ((str[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + return num; +} + +static int32_t +parse_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_LCORE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int32_t i; + uint32_t size; + + nb_lcore_params = 0; + + while ((p = strchr(p0, '(')) != NULL) { + ++p; + p0 = strchr(p, ')'); + if (p0 == NULL) + return -1; + + size = p0 - p; + if (size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != + _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { + printf("exceeded max number of lcore params: %hu\n", + nb_lcore_params); + return -1; + } + lcore_params_array[nb_lcore_params].port_id = + (uint8_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = + (uint8_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = + (uint8_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; + return 0; +} + +#define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt))) +static int32_t +parse_args_long_options(struct option *lgopts, int32_t option_index) +{ + int32_t ret = -1; + const char *optname = lgopts[option_index].name; + + if (__STRNCMP(optname, OPTION_CONFIG)) { + ret = parse_config(optarg); + if (ret) + printf("invalid config\n"); + } + + if (__STRNCMP(optname, OPTION_SINGLE_SA)) { + ret = parse_decimal(optarg); + if (ret != -1) { + single_sa = 1; + single_sa_idx = ret; + printf("Configured with single SA index %u\n", + single_sa_idx); + ret = 0; + } + } + + return ret; +} +#undef __STRNCMP + +static int32_t +parse_args(int32_t argc, char **argv) +{ + int32_t opt, ret; + char **argvopt; + int32_t option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {OPTION_CONFIG, 1, 0, 0}, + {OPTION_SINGLE_SA, 1, 0, 0}, + {NULL, 0, 0, 0} + }; + int32_t f_present = 0; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:Pu:f:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'P': + printf("Promiscuous mode selected\n"); + promiscuous_on = 1; + break; + case 'u': + unprotected_port_mask = parse_portmask(optarg); + if (unprotected_port_mask == 0) { + printf("invalid unprotected portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'f': + if (f_present == 1) { + printf("\"-f\" option present more than " + "once!\n"); + print_usage(prgname); + return -1; + } + if (parse_cfg_file(optarg) < 0) { + printf("parsing file \"%s\" failed\n", + optarg); + print_usage(prgname); + return -1; + } + f_present = 1; + break; + case 0: + if (parse_args_long_options(lgopts, option_index)) { + print_usage(prgname); + return -1; + } + break; + default: + print_usage(prgname); + return -1; + } + } + + if (f_present == 0) { + printf("Mandatory option \"-f\" not present\n"); + return -1; + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 1; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, const struct ether_addr *eth_addr) +{ + char buf[ETHER_ADDR_FMT_SIZE]; + ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); + printf("%s%s", name, buf); +} + +/* Check the link status of all ports in up to 9s, and print them finally */ +static void +check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ + uint8_t portid, count, all_ports_up, print_flag = 0; + struct rte_eth_link link; + + printf("\nChecking link status"); + fflush(stdout); + for (count = 0; count <= MAX_CHECK_TIME; count++) { + all_ports_up = 1; + for (portid = 0; portid < port_num; portid++) { + if ((port_mask & (1 << portid)) == 0) + continue; + memset(&link, 0, sizeof(link)); + rte_eth_link_get_nowait(portid, &link); + /* print link status if flag set */ + if (print_flag == 1) { + if (link.link_status) + printf("Port %d Link Up - speed %u " + "Mbps - %s\n", (uint8_t)portid, + (uint32_t)link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + else + printf("Port %d Link Down\n", + (uint8_t)portid); + continue; + } + /* clear all_ports_up flag if any link down */ + if (link.link_status == ETH_LINK_DOWN) { + all_ports_up = 0; + break; + } + } + /* after finally printing all link status, get out */ + if (print_flag == 1) + break; + + if (all_ports_up == 0) { + printf("."); + fflush(stdout); + rte_delay_ms(CHECK_INTERVAL); + } + + /* set the print_flag if all ports up or timeout */ + if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { + print_flag = 1; + printf("done\n"); + } + } +} + +static int32_t +add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id, + uint16_t qp, struct lcore_params *params, + struct ipsec_ctx *ipsec_ctx, + const struct rte_cryptodev_capabilities *cipher, + const struct rte_cryptodev_capabilities *auth) +{ + int32_t ret = 0; + unsigned long i; + struct cdev_key key = { 0 }; + + key.lcore_id = params->lcore_id; + if (cipher) + key.cipher_algo = cipher->sym.cipher.algo; + if (auth) + key.auth_algo = auth->sym.auth.algo; + + ret = rte_hash_lookup(map, &key); + if (ret != -ENOENT) + return 0; + + for (i = 0; i < ipsec_ctx->nb_qps; i++) + if (ipsec_ctx->tbl[i].id == cdev_id) + break; + + if (i == ipsec_ctx->nb_qps) { + if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) { + printf("Maximum number of crypto devices assigned to " + "a core, increase MAX_QP_PER_LCORE value\n"); + return 0; + } + ipsec_ctx->tbl[i].id = cdev_id; + ipsec_ctx->tbl[i].qp = qp; + ipsec_ctx->nb_qps++; + printf("%s cdev mapping: lcore %u using cdev %u qp %u " + "(cdev_id_qp %lu)\n", str, key.lcore_id, + cdev_id, qp, i); + } + + ret = rte_hash_add_key_data(map, &key, (void *)i); + if (ret < 0) { + printf("Faled to insert cdev mapping for (lcore %u, " + "cdev %u, qp %u), errno %d\n", + key.lcore_id, ipsec_ctx->tbl[i].id, + ipsec_ctx->tbl[i].qp, ret); + return 0; + } + + return 1; +} + +static int32_t +add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, + uint16_t qp, struct lcore_params *params) +{ + int32_t ret = 0; + const struct rte_cryptodev_capabilities *i, *j; + struct rte_hash *map; + struct lcore_conf *qconf; + struct ipsec_ctx *ipsec_ctx; + const char *str; + + qconf = &lcore_conf[params->lcore_id]; + + if ((unprotected_port_mask & (1 << params->port_id)) == 0) { + map = cdev_map_out; + ipsec_ctx = &qconf->outbound; + str = "Outbound"; + } else { + map = cdev_map_in; + ipsec_ctx = &qconf->inbound; + str = "Inbound"; + } + + /* Required cryptodevs with operation chainning */ + if (!(dev_info->feature_flags & + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + return ret; + + for (i = dev_info->capabilities; + i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { + if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + for (j = dev_info->capabilities; + j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { + if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + ret |= add_mapping(map, str, cdev_id, qp, params, + ipsec_ctx, i, j); + } + } + + return ret; +} + +static int32_t +cryptodevs_init(void) +{ + struct rte_cryptodev_config dev_conf; + struct rte_cryptodev_qp_conf qp_conf; + uint16_t idx, max_nb_qps, qp, i; + int16_t cdev_id; + struct rte_hash_parameters params = { 0 }; + + params.entries = CDEV_MAP_ENTRIES; + params.key_len = sizeof(struct cdev_key); + params.hash_func = rte_jhash; + params.hash_func_init_val = 0; + params.socket_id = rte_socket_id(); + + params.name = "cdev_map_in"; + cdev_map_in = rte_hash_create(¶ms); + if (cdev_map_in == NULL) + rte_panic("Failed to create cdev_map hash table, errno = %d\n", + rte_errno); + + params.name = "cdev_map_out"; + cdev_map_out = rte_hash_create(¶ms); + if (cdev_map_out == NULL) + rte_panic("Failed to create cdev_map hash table, errno = %d\n", + rte_errno); + + printf("lcore/cryptodev/qp mappings:\n"); + + idx = 0; + /* Start from last cdev id to give HW priority */ + for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) { + struct rte_cryptodev_info cdev_info; + + rte_cryptodev_info_get(cdev_id, &cdev_info); + + if (nb_lcore_params > cdev_info.max_nb_queue_pairs) + max_nb_qps = cdev_info.max_nb_queue_pairs; + else + max_nb_qps = nb_lcore_params; + + qp = 0; + i = 0; + while (qp < max_nb_qps && i < nb_lcore_params) { + if (add_cdev_mapping(&cdev_info, cdev_id, qp, + &lcore_params[idx])) + qp++; + idx++; + idx = idx % nb_lcore_params; + i++; + } + + if (qp == 0) + continue; + + dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); + dev_conf.nb_queue_pairs = qp; + dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS; + dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ; + + if (rte_cryptodev_configure(cdev_id, &dev_conf)) + rte_panic("Failed to initialize crypodev %u\n", + cdev_id); + + qp_conf.nb_descriptors = CDEV_QUEUE_DESC; + for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) + if (rte_cryptodev_queue_pair_setup(cdev_id, qp, + &qp_conf, dev_conf.socket_id)) + rte_panic("Failed to setup queue %u for " + "cdev_id %u\n", 0, cdev_id); + + if (rte_cryptodev_start(cdev_id)) + rte_panic("Failed to start cryptodev %u\n", + cdev_id); + } + + printf("\n"); + + return 0; +} + +static void +port_init(uint8_t portid) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; + uint16_t nb_tx_queue, nb_rx_queue; + uint16_t tx_queueid, rx_queueid, queue, lcore_id; + int32_t ret, socket_id; + struct lcore_conf *qconf; + struct ether_addr ethaddr; + + rte_eth_dev_info_get(portid, &dev_info); + + printf("Configuring device port %u:\n", portid); + + rte_eth_macaddr_get(portid, ðaddr); + ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr); + print_ethaddr("Address: ", ðaddr); + printf("\n"); + + nb_rx_queue = get_port_nb_rx_queues(portid); + nb_tx_queue = nb_lcores; + + if (nb_rx_queue > dev_info.max_rx_queues) + rte_exit(EXIT_FAILURE, "Error: queue %u not available " + "(max rx queue is %u)\n", + nb_rx_queue, dev_info.max_rx_queues); + + if (nb_tx_queue > dev_info.max_tx_queues) + rte_exit(EXIT_FAILURE, "Error: queue %u not available " + "(max tx queue is %u)\n", + nb_tx_queue, dev_info.max_tx_queues); + + printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", + nb_rx_queue, nb_tx_queue); + + ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, + &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: " + "err=%d, port=%d\n", ret, portid); + + /* init one TX queue per lcore */ + tx_queueid = 0; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socket_id = 0; + + /* init TX queue */ + printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); + + txconf = &dev_info.default_txconf; + txconf->txq_flags = 0; + + ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, + socket_id, txconf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%d\n", ret, portid); + + qconf = &lcore_conf[lcore_id]; + qconf->tx_queue_id[portid] = tx_queueid; + tx_queueid++; + + /* init RX queues */ + for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { + if (portid != qconf->rx_queue_list[queue].port_id) + continue; + + rx_queueid = qconf->rx_queue_list[queue].queue_id; + + printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, + socket_id); + + ret = rte_eth_rx_queue_setup(portid, rx_queueid, + nb_rxd, socket_id, NULL, + socket_ctx[socket_id].mbuf_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_rx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + } + } + printf("\n"); +} + +static void +pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf) +{ + char s[64]; + + snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id); + ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf, + MEMPOOL_CACHE_SIZE, ipsec_metadata_size(), + RTE_MBUF_DEFAULT_BUF_SIZE, + socket_id); + if (ctx->mbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", + socket_id); + else + printf("Allocated mbuf pool on socket %d\n", socket_id); +} + +int32_t +main(int32_t argc, char **argv) +{ + int32_t ret; + uint32_t lcore_id, nb_ports; + uint8_t portid, socket_id; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid parameters\n"); + + if ((unprotected_port_mask & enabled_port_mask) != + unprotected_port_mask) + rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", + unprotected_port_mask); + + nb_ports = rte_eth_dev_count(); + + if (check_params() < 0) + rte_exit(EXIT_FAILURE, "check_params failed\n"); + + ret = init_lcore_rx_queues(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); + + nb_lcores = rte_lcore_count(); + + /* Replicate each contex per socket */ + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socket_id = 0; + + if (socket_ctx[socket_id].mbuf_pool) + continue; + + sa_init(&socket_ctx[socket_id], socket_id); + + sp4_init(&socket_ctx[socket_id], socket_id); + + sp6_init(&socket_ctx[socket_id], socket_id); + + rt_init(&socket_ctx[socket_id], socket_id); + + pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); + } + + for (portid = 0; portid < nb_ports; portid++) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + port_init(portid); + } + + cryptodevs_init(); + + /* start ports */ + for (portid = 0; portid < nb_ports; portid++) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%d\n", ret, portid); + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(portid); + } + + check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/ipsec.c b/src/seastar/dpdk/examples/ipsec-secgw/ipsec.c new file mode 100644 index 00000000..edca5f02 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/ipsec.c @@ -0,0 +1,235 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#include <rte_branch_prediction.h> +#include <rte_log.h> +#include <rte_crypto.h> +#include <rte_cryptodev.h> +#include <rte_mbuf.h> +#include <rte_hash.h> + +#include "ipsec.h" +#include "esp.h" + +static inline int +create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa) +{ + struct rte_cryptodev_info cdev_info; + unsigned long cdev_id_qp = 0; + int32_t ret; + struct cdev_key key = { 0 }; + + key.lcore_id = (uint8_t)rte_lcore_id(); + + key.cipher_algo = (uint8_t)sa->cipher_algo; + key.auth_algo = (uint8_t)sa->auth_algo; + + ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, + (void **)&cdev_id_qp); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, " + "auth_algo %u\n", key.lcore_id, key.cipher_algo, + key.auth_algo); + return -1; + } + + RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev " + "%u qp %u\n", sa->spi, + ipsec_ctx->tbl[cdev_id_qp].id, + ipsec_ctx->tbl[cdev_id_qp].qp); + + sa->crypto_session = rte_cryptodev_sym_session_create( + ipsec_ctx->tbl[cdev_id_qp].id, sa->xforms); + + rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info); + if (cdev_info.sym.max_nb_sessions_per_qp > 0) { + ret = rte_cryptodev_queue_pair_attach_sym_session( + ipsec_ctx->tbl[cdev_id_qp].qp, + sa->crypto_session); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, + "Session cannot be attached to qp %u ", + ipsec_ctx->tbl[cdev_id_qp].qp); + return -1; + } + } + sa->cdev_id_qp = cdev_id_qp; + + return 0; +} + +static inline void +enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop) +{ + int32_t ret, i; + + cqp->buf[cqp->len++] = cop; + + if (cqp->len == MAX_PKT_BURST) { + ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, + cqp->buf, cqp->len); + if (ret < cqp->len) { + RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" + " enqueued %u crypto ops out of %u\n", + cqp->id, cqp->qp, + ret, cqp->len); + for (i = ret; i < cqp->len; i++) + rte_pktmbuf_free(cqp->buf[i]->sym->m_src); + } + cqp->in_flight += ret; + cqp->len = 0; + } +} + +static inline void +ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], struct ipsec_sa *sas[], + uint16_t nb_pkts) +{ + int32_t ret = 0, i; + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + + for (i = 0; i < nb_pkts; i++) { + if (unlikely(sas[i] == NULL)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_prefetch0(sas[i]); + rte_prefetch0(pkts[i]); + + priv = get_priv(pkts[i]); + sa = sas[i]; + priv->sa = sa; + + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + priv->cop.sym = &priv->sym_cop; + + if ((unlikely(sa->crypto_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_crypto_op_attach_sym_session(&priv->cop, + sa->crypto_session); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps); + enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop); + } +} + +static inline int +ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], uint16_t max_pkts) +{ + int32_t nb_pkts = 0, ret = 0, i, j, nb_cops; + struct ipsec_mbuf_metadata *priv; + struct rte_crypto_op *cops[max_pkts]; + struct ipsec_sa *sa; + struct rte_mbuf *pkt; + + for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) { + struct cdev_qp *cqp; + + cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++]; + if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) + ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; + + if (cqp->in_flight == 0) + continue; + + nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, + cops, max_pkts - nb_pkts); + + cqp->in_flight -= nb_cops; + + for (j = 0; j < nb_cops; j++) { + pkt = cops[j]->sym->m_src; + rte_prefetch0(pkt); + + priv = get_priv(pkt); + sa = priv->sa; + + RTE_ASSERT(sa != NULL); + + ret = xform_func(pkt, sa, cops[j]); + if (unlikely(ret)) + rte_pktmbuf_free(pkt); + else + pkts[nb_pkts++] = pkt; + } + } + + /* return packets */ + return nb_pkts; +} + +uint16_t +ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t nb_pkts, uint16_t len) +{ + struct ipsec_sa *sas[nb_pkts]; + + inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts); + + ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts); + + return ipsec_dequeue(esp_inbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len) +{ + struct ipsec_sa *sas[nb_pkts]; + + outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts); + + ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts); + + return ipsec_dequeue(esp_outbound_post, ctx, pkts, len); +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/ipsec.h b/src/seastar/dpdk/examples/ipsec-secgw/ipsec.h new file mode 100644 index 00000000..fe426614 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/ipsec.h @@ -0,0 +1,234 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __IPSEC_H__ +#define __IPSEC_H__ + +#include <stdint.h> + +#include <rte_byteorder.h> +#include <rte_crypto.h> + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 +#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 +#define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3 + +#define MAX_PKT_BURST 32 +#define MAX_QP_PER_LCORE 256 + +#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */ + +#define uint32_t_to_char(ip, a, b, c, d) do {\ + *a = (uint8_t)(ip >> 24 & 0xff);\ + *b = (uint8_t)(ip >> 16 & 0xff);\ + *c = (uint8_t)(ip >> 8 & 0xff);\ + *d = (uint8_t)(ip & 0xff);\ + } while (0) + +#define DEFAULT_MAX_CATEGORIES 1 + +#define IPSEC_SA_MAX_ENTRIES (128) /* must be power of 2, max 2 power 30 */ +#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1)) +#define INVALID_SPI (0) + +#define DISCARD (0x80000000) +#define BYPASS (0x40000000) +#define PROTECT_MASK (0x3fffffff) +#define PROTECT(sa_idx) (SPI2IDX(sa_idx) & PROTECT_MASK) /* SA idx 30 bits */ + +#define IPSEC_XFORM_MAX 2 + +#define IP6_VERSION (6) + +struct rte_crypto_xform; +struct ipsec_xform; +struct rte_cryptodev_session; +struct rte_mbuf; + +struct ipsec_sa; + +typedef int32_t (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa, + struct rte_crypto_op *cop); + +struct ip_addr { + union { + uint32_t ip4; + union { + uint64_t ip6[2]; + uint8_t ip6_b[16]; + } ip6; + } ip; +}; + +#define MAX_KEY_SIZE 32 + +struct ipsec_sa { + uint32_t spi; + uint32_t cdev_id_qp; + uint64_t seq; + uint32_t salt; + struct rte_cryptodev_sym_session *crypto_session; + enum rte_crypto_cipher_algorithm cipher_algo; + enum rte_crypto_auth_algorithm auth_algo; + uint16_t digest_len; + uint16_t iv_len; + uint16_t block_size; + uint16_t flags; +#define IP4_TUNNEL (1 << 0) +#define IP6_TUNNEL (1 << 1) +#define TRANSPORT (1 << 2) + struct ip_addr src; + struct ip_addr dst; + uint8_t cipher_key[MAX_KEY_SIZE]; + uint16_t cipher_key_len; + uint8_t auth_key[MAX_KEY_SIZE]; + uint16_t auth_key_len; + uint16_t aad_len; + struct rte_crypto_sym_xform *xforms; +} __rte_cache_aligned; + +struct ipsec_mbuf_metadata { + uint8_t buf[32]; + struct ipsec_sa *sa; + struct rte_crypto_op cop; + struct rte_crypto_sym_op sym_cop; +} __rte_cache_aligned; + +struct cdev_qp { + uint16_t id; + uint16_t qp; + uint16_t in_flight; + uint16_t len; + struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct ipsec_ctx { + struct rte_hash *cdev_map; + struct sp_ctx *sp4_ctx; + struct sp_ctx *sp6_ctx; + struct sa_ctx *sa_ctx; + uint16_t nb_qps; + uint16_t last_qp; + struct cdev_qp tbl[MAX_QP_PER_LCORE]; +}; + +struct cdev_key { + uint16_t lcore_id; + uint8_t cipher_algo; + uint8_t auth_algo; +}; + +struct socket_ctx { + struct sa_ctx *sa_in; + struct sa_ctx *sa_out; + struct sp_ctx *sp_ip4_in; + struct sp_ctx *sp_ip4_out; + struct sp_ctx *sp_ip6_in; + struct sp_ctx *sp_ip6_out; + struct rt_ctx *rt_ip4; + struct rt_ctx *rt_ip6; + struct rte_mempool *mbuf_pool; +}; + +struct cnt_blk { + uint32_t salt; + uint64_t iv; + uint32_t cnt; +} __attribute__((packed)); + +uint16_t +ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t nb_pkts, uint16_t len); + +uint16_t +ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len); + +static inline uint16_t +ipsec_metadata_size(void) +{ + return sizeof(struct ipsec_mbuf_metadata); +} + +static inline struct ipsec_mbuf_metadata * +get_priv(struct rte_mbuf *m) +{ + return RTE_PTR_ADD(m, sizeof(struct rte_mbuf)); +} + +static inline void * +get_cnt_blk(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[0]; +} + +static inline void * +get_aad(struct rte_mbuf *m) +{ + struct ipsec_mbuf_metadata *priv = get_priv(m); + + return &priv->buf[16]; +} + +static inline void * +get_sym_cop(struct rte_crypto_op *cop) +{ + return (cop + 1); +} + +int +inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx); + +void +inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], + struct ipsec_sa *sa[], uint16_t nb_pkts); + +void +outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], + struct ipsec_sa *sa[], uint16_t nb_pkts); + +void +sp4_init(struct socket_ctx *ctx, int32_t socket_id); + +void +sp6_init(struct socket_ctx *ctx, int32_t socket_id); + +void +sa_init(struct socket_ctx *ctx, int32_t socket_id); + +void +rt_init(struct socket_ctx *ctx, int32_t socket_id); + +#endif /* __IPSEC_H__ */ diff --git a/src/seastar/dpdk/examples/ipsec-secgw/parser.c b/src/seastar/dpdk/examples/ipsec-secgw/parser.c new file mode 100644 index 00000000..9d0ea462 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/parser.c @@ -0,0 +1,591 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <rte_common.h> +#include <rte_crypto.h> + +#include <cmdline_parse_string.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_ipaddr.h> +#include <cmdline_socket.h> +#include <cmdline.h> + +#include "ipsec.h" +#include "parser.h" + +#define PARSE_DELIMITER " \f\n\r\t\v" +static int +parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if ((string == NULL) || + (tokens == NULL) || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if ((i == *n_tokens) && + (NULL != strtok_r(string, PARSE_DELIMITER, &string))) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon + * means error */ + if (dbloct_count == 8) + return 0; + + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask) +{ + char ip_str[256] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strncpy(ip_str, token, pch - token); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strncpy(ip_str, token, sizeof(ip_str) - 1); + if (mask) + *mask = 0; + } + + if (strlen(ip_str) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(ip_str, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask) +{ + char ip_str[256] = {0}; + char *pch; + + pch = strchr(token, '/'); + if (pch != NULL) { + strncpy(ip_str, token, pch - token); + pch += 1; + if (is_str_num(pch) != 0) + return -EINVAL; + if (mask) + *mask = atoi(pch); + } else { + strncpy(ip_str, token, sizeof(ip_str) - 1); + if (mask) + *mask = 0; + } + + if (strlen(ip_str) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(ip_str, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +parse_range(const char *token, uint16_t *low, uint16_t *high) +{ + char ch; + char num_str[20]; + uint32_t pos; + int range_low = -1; + int range_high = -1; + + if (!low || !high) + return -1; + + memset(num_str, 0, 20); + pos = 0; + + while ((ch = *token++) != '\0') { + if (isdigit(ch)) { + if (pos >= 19) + return -1; + num_str[pos++] = ch; + } else if (ch == ':') { + if (range_low != -1) + return -1; + range_low = atoi(num_str); + memset(num_str, 0, 20); + pos = 0; + } + } + + if (strlen(num_str) == 0) + return -1; + + range_high = atoi(num_str); + + *low = (uint16_t)range_low; + *high = (uint16_t)range_high; + + return 0; +} + +/** sp add parse */ +struct cfg_sp_add_cfg_item { + cmdline_fixed_string_t sp_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sp_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sp_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK((parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0), status, "too many arguments"); + + if (status->status < 0) + return; + + if (strcmp(tokens[0], "ipv4") == 0) { + parse_sp4_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else if (strcmp(tokens[0], "ipv6") == 0) { + parse_sp6_tokens(tokens, n_tokens, status); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognizable input %s\n", + tokens[0]); + return; + } +} + +static cmdline_parse_token_string_t cfg_sp_add_sp_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, + sp_keyword, "sp"); + +static cmdline_parse_token_string_t cfg_sp_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sp_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sp_add_rule = { + .f = cfg_sp_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sp_add_sp_str, + (void *) &cfg_sp_add_multi_str, + NULL, + }, +}; + +/* sa add parse */ +struct cfg_sa_add_cfg_item { + cmdline_fixed_string_t sa_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_sa_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_sa_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string(params->multi_string, tokens, + &n_tokens) == 0, status, "too many arguments\n"); + + parse_sa_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_sa_add_sa_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, + sa_keyword, "sa"); + +static cmdline_parse_token_string_t cfg_sa_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_sa_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_sa_add_rule = { + .f = cfg_sa_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_sa_add_sa_str, + (void *) &cfg_sa_add_multi_str, + NULL, + }, +}; + +/* rt add parse */ +struct cfg_rt_add_cfg_item { + cmdline_fixed_string_t rt_keyword; + cmdline_multi_string_t multi_string; +}; + +static void +cfg_rt_add_cfg_item_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, void *data) +{ + struct cfg_rt_add_cfg_item *params = parsed_result; + char *tokens[32]; + uint32_t n_tokens = RTE_DIM(tokens); + struct parse_status *status = (struct parse_status *)data; + + APP_CHECK(parse_tokenize_string( + params->multi_string, tokens, &n_tokens) == 0, + status, "too many arguments\n"); + if (status->status < 0) + return; + + parse_rt_tokens(tokens, n_tokens, status); +} + +static cmdline_parse_token_string_t cfg_rt_add_rt_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, + rt_keyword, "rt"); + +static cmdline_parse_token_string_t cfg_rt_add_multi_str = + TOKEN_STRING_INITIALIZER(struct cfg_rt_add_cfg_item, multi_string, + TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cfg_rt_add_rule = { + .f = cfg_rt_add_cfg_item_parsed, + .data = NULL, + .help_str = "", + .tokens = { + (void *) &cfg_rt_add_rt_str, + (void *) &cfg_rt_add_multi_str, + NULL, + }, +}; + +/** set of cfg items */ +cmdline_parse_ctx_t ipsec_ctx[] = { + (cmdline_parse_inst_t *)&cfg_sp_add_rule, + (cmdline_parse_inst_t *)&cfg_sa_add_rule, + (cmdline_parse_inst_t *)&cfg_rt_add_rule, + NULL, +}; + +int +parse_cfg_file(const char *cfg_filename) +{ + struct cmdline *cl = cmdline_stdin_new(ipsec_ctx, ""); + FILE *f = fopen(cfg_filename, "r"); + char str[1024] = {0}, *get_s = NULL; + uint32_t line_num = 0; + struct parse_status status = {0}; + + if (f == NULL) { + rte_panic("Error: invalid file descriptor %s\n", cfg_filename); + goto error_exit; + } + + if (cl == NULL) { + rte_panic("Error: cannot create cmdline instance\n"); + goto error_exit; + } + + cfg_sp_add_rule.data = &status; + cfg_sa_add_rule.data = &status; + cfg_rt_add_rule.data = &status; + + do { + char oneline[1024]; + char *pos; + get_s = fgets(oneline, 1024, f); + + if (!get_s) + break; + + line_num++; + + if (strlen(oneline) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + /* process comment char '#' */ + if (oneline[0] == '#') + continue; + + pos = strchr(oneline, '#'); + if (pos != NULL) + *pos = '\0'; + + /* process line concatenator '\' */ + pos = strchr(oneline, 92); + if (pos != NULL) { + if (pos != oneline+strlen(oneline) - 2) { + rte_panic("%s:%u: error: " + "no character should exist after '\\'\n", + cfg_filename, line_num); + goto error_exit; + } + + *pos = '\0'; + + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the concatenated line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + + strncpy(str + strlen(str), oneline, + strlen(oneline)); + + continue; + } + + /* copy the line to str and process */ + if (strlen(oneline) + strlen(str) > 1022) { + rte_panic("%s:%u: error: " + "the line contains more characters the parser can handle\n", + cfg_filename, line_num); + goto error_exit; + } + strncpy(str + strlen(str), oneline, + strlen(oneline)); + + str[strlen(str)] = '\n'; + if (cmdline_parse(cl, str) < 0) { + rte_panic("%s:%u: error: parsing \"%s\" failed\n", + cfg_filename, line_num, str); + goto error_exit; + } + + if (status.status < 0) { + rte_panic("%s:%u: error: %s", cfg_filename, + line_num, status.parse_msg); + goto error_exit; + } + + memset(str, 0, 1024); + } while (1); + + cmdline_stdin_exit(cl); + fclose(f); + + return 0; + +error_exit: + if (cl) + cmdline_stdin_exit(cl); + if (f) + fclose(f); + + return -1; +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/parser.h b/src/seastar/dpdk/examples/ipsec-secgw/parser.h new file mode 100644 index 00000000..d31ae016 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/parser.h @@ -0,0 +1,116 @@ +/* BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#ifndef __PARSER_H +#define __PARSER_H + +struct parse_status { + int status; + char parse_msg[256]; +}; + +#define APP_CHECK(exp, status, fmt, ...) \ +do { \ + if (!(exp)) { \ + sprintf(status->parse_msg, fmt "\n", \ + ## __VA_ARGS__); \ + status->status = -1; \ + } else \ + status->status = 0; \ +} while (0) + +#define APP_CHECK_PRESENCE(val, str, status) \ + APP_CHECK(val == 0, status, \ + "item \"%s\" already present", str) + +#define APP_CHECK_TOKEN_EQUAL(tokens, index, ref, status) \ + APP_CHECK(strcmp(tokens[index], ref) == 0, status, \ + "unrecognized input \"%s\": expect \"%s\"\n", \ + tokens[index], ref) + +static inline int +is_str_num(const char *str) +{ + uint32_t i; + + for (i = 0; i < strlen(str); i++) + if (!isdigit(str[i])) + return -1; + + return 0; +} + +#define APP_CHECK_TOKEN_IS_NUM(tokens, index, status) \ + APP_CHECK(is_str_num(tokens[index]) == 0, status, \ + "input \"%s\" is not valid number string", tokens[index]) + + +#define INCREMENT_TOKEN_INDEX(index, max_num, status) \ +do { \ + APP_CHECK(index + 1 < max_num, status, "reaching the end of " \ + "the token array"); \ + index++; \ +} while (0) + +int +parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask); + +int +parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask); + +int +parse_range(const char *token, uint16_t *low, uint16_t *high); + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status); + +int +parse_cfg_file(const char *cfg_filename); + +#endif diff --git a/src/seastar/dpdk/examples/ipsec-secgw/rt.c b/src/seastar/dpdk/examples/ipsec-secgw/rt.c new file mode 100644 index 00000000..e03c5f0b --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/rt.c @@ -0,0 +1,235 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Routing Table (RT) + */ +#include <sys/types.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> +#include <rte_errno.h> +#include <rte_ip.h> + +#include "ipsec.h" +#include "parser.h" + +#define RT_IPV4_MAX_RULES 1024 +#define RT_IPV6_MAX_RULES 1024 + +struct ip4_route { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +struct ip6_route { + uint8_t ip[16]; + uint8_t depth; + uint8_t if_out; +}; + +struct ip4_route rt_ip4[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip4; + +struct ip6_route rt_ip6[RT_IPV4_MAX_RULES]; +uint32_t nb_rt_ip6; + +void +parse_rt_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + uint32_t ti; + uint32_t *n_rts = NULL; + struct ip4_route *route_ipv4 = NULL; + struct ip6_route *route_ipv6 = NULL; + + if (strcmp(tokens[0], "ipv4") == 0) { + n_rts = &nb_rt_ip4; + route_ipv4 = &rt_ip4[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV4_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + + } else if (strcmp(tokens[0], "ipv6") == 0) { + n_rts = &nb_rt_ip6; + route_ipv6 = &rt_ip6[*n_rts]; + + APP_CHECK(*n_rts <= RT_IPV6_MAX_RULES - 1, status, + "too many rt rules, abort insertion\n"); + if (status->status < 0) + return; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[0]); + return; + } + + for (ti = 1; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "dst") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (route_ipv4 != NULL) { + struct in_addr ip; + uint32_t depth = 0; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + route_ipv4->ip = rte_bswap32( + (uint32_t)ip.s_addr); + route_ipv4->depth = (uint8_t)depth; + } else { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK(parse_ipv6_addr(tokens[ti], + &ip, &depth) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 address", + tokens[ti]); + if (status->status < 0) + return; + memcpy(route_ipv6->ip, ip.s6_addr, 16); + route_ipv6->depth = (uint8_t)depth; + } + } + + if (strcmp(tokens[ti], "port") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + if (route_ipv4 != NULL) + route_ipv4->if_out = atoi(tokens[ti]); + else + route_ipv6->if_out = atoi(tokens[ti]); + } + } + + *n_rts = *n_rts + 1; +} + +void +rt_init(struct socket_ctx *ctx, int32_t socket_id) +{ + char name[PATH_MAX]; + uint32_t i; + int32_t ret; + struct rte_lpm *lpm; + struct rte_lpm6 *lpm6; + char a, b, c, d; + struct rte_lpm_config conf = { 0 }; + struct rte_lpm6_config conf6 = { 0 }; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->rt_ip4 != NULL) + rte_exit(EXIT_FAILURE, "IPv4 Routing Table for socket %u " + "already initialized\n", socket_id); + + if (ctx->rt_ip6 != NULL) + rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u " + "already initialized\n", socket_id); + + if (nb_rt_ip4 == 0 && nb_rt_ip6 == 0) + RTE_LOG(WARNING, IPSEC, "No Routing rule specified\n"); + + printf("Creating IPv4 Routing Table (RT) context with %u max routes\n", + RT_IPV4_MAX_RULES); + + /* create the LPM table */ + snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id); + conf.max_rules = RT_IPV4_MAX_RULES; + conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES; + lpm = rte_lpm_create(name, socket_id, &conf); + if (lpm == NULL) + rte_exit(EXIT_FAILURE, "Unable to create %s LPM table " + "on socket %d\n", name, socket_id); + + /* populate the LPM table */ + for (i = 0; i < nb_rt_ip4; i++) { + ret = rte_lpm_add(lpm, rt_ip4[i].ip, rt_ip4[i].depth, + rt_ip4[i].if_out); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " + "LPM table on socket %d\n", i, name, socket_id); + + uint32_t_to_char(rt_ip4[i].ip, &a, &b, &c, &d); + printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n", + a, b, c, d, rt_ip4[i].depth, + rt_ip4[i].if_out); + } + + snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id); + conf6.max_rules = RT_IPV6_MAX_RULES; + conf6.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES; + lpm6 = rte_lpm6_create(name, socket_id, &conf6); + if (lpm6 == NULL) + rte_exit(EXIT_FAILURE, "Unable to create %s LPM table " + "on socket %d\n", name, socket_id); + + /* populate the LPM table */ + for (i = 0; i < nb_rt_ip6; i++) { + ret = rte_lpm6_add(lpm6, rt_ip6[i].ip, rt_ip6[i].depth, + rt_ip6[i].if_out); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s " + "LPM table on socket %d\n", i, name, socket_id); + + printf("LPM6: Adding route " + " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n", + (uint16_t)((rt_ip6[i].ip[0] << 8) | rt_ip6[i].ip[1]), + (uint16_t)((rt_ip6[i].ip[2] << 8) | rt_ip6[i].ip[3]), + (uint16_t)((rt_ip6[i].ip[4] << 8) | rt_ip6[i].ip[5]), + (uint16_t)((rt_ip6[i].ip[6] << 8) | rt_ip6[i].ip[7]), + (uint16_t)((rt_ip6[i].ip[8] << 8) | rt_ip6[i].ip[9]), + (uint16_t)((rt_ip6[i].ip[10] << 8) | rt_ip6[i].ip[11]), + (uint16_t)((rt_ip6[i].ip[12] << 8) | rt_ip6[i].ip[13]), + (uint16_t)((rt_ip6[i].ip[14] << 8) | rt_ip6[i].ip[15]), + rt_ip6[i].depth, rt_ip6[i].if_out); + } + + ctx->rt_ip4 = (struct rt_ctx *)lpm; + ctx->rt_ip6 = (struct rt_ctx *)lpm6; +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/sa.c b/src/seastar/dpdk/examples/ipsec-secgw/sa.c new file mode 100644 index 00000000..39624c49 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/sa.c @@ -0,0 +1,794 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Security Associations + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> + +#include <rte_memzone.h> +#include <rte_crypto.h> +#include <rte_cryptodev.h> +#include <rte_byteorder.h> +#include <rte_errno.h> +#include <rte_ip.h> +#include <rte_random.h> + +#include "ipsec.h" +#include "esp.h" +#include "parser.h" + +struct supported_cipher_algo { + const char *keyword; + enum rte_crypto_cipher_algorithm algo; + uint16_t iv_len; + uint16_t block_size; + uint16_t key_len; +}; + +struct supported_auth_algo { + const char *keyword; + enum rte_crypto_auth_algorithm algo; + uint16_t digest_len; + uint16_t key_len; + uint8_t aad_len; + uint8_t key_not_req; +}; + +const struct supported_cipher_algo cipher_algos[] = { + { + .keyword = "null", + .algo = RTE_CRYPTO_CIPHER_NULL, + .iv_len = 0, + .block_size = 4, + .key_len = 0 + }, + { + .keyword = "aes-128-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 16 + }, + { + .keyword = "aes-128-gcm", + .algo = RTE_CRYPTO_CIPHER_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 20 + }, + { + .keyword = "aes-128-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 8, + .block_size = 16, /* XXX AESNI MB limition, should be 4 */ + .key_len = 20 + } +}; + +const struct supported_auth_algo auth_algos[] = { + { + .keyword = "null", + .algo = RTE_CRYPTO_AUTH_NULL, + .digest_len = 0, + .key_len = 0, + .key_not_req = 1 + }, + { + .keyword = "sha1-hmac", + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .digest_len = 12, + .key_len = 20 + }, + { + .keyword = "sha256-hmac", + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .digest_len = 12, + .key_len = 32 + }, + { + .keyword = "aes-128-gcm", + .algo = RTE_CRYPTO_AUTH_AES_GCM, + .digest_len = 16, + .aad_len = 8, + .key_not_req = 1 + } +}; + +struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES]; +uint32_t nb_sa_out; + +struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES]; +uint32_t nb_sa_in; + +static const struct supported_cipher_algo * +find_match_cipher_algo(const char *cipher_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + const struct supported_cipher_algo *algo = + &cipher_algos[i]; + + if (strcmp(cipher_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +static const struct supported_auth_algo * +find_match_auth_algo(const char *auth_keyword) +{ + size_t i; + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + const struct supported_auth_algo *algo = + &auth_algos[i]; + + if (strcmp(auth_keyword, algo->keyword) == 0) + return algo; + } + + return NULL; +} + +/** parse_key_string + * parse x:x:x:x.... hex number key string into uint8_t *key + * return: + * > 0: number of bytes parsed + * 0: failed + */ +static uint32_t +parse_key_string(const char *key_str, uint8_t *key) +{ + const char *pt_start = key_str, *pt_end = key_str; + uint32_t nb_bytes = 0; + + while (pt_end != NULL) { + char sub_str[3] = {0}; + + pt_end = strchr(pt_start, ':'); + + if (pt_end == NULL) { + if (strlen(pt_start) > 2) + return 0; + strncpy(sub_str, pt_start, 2); + } else { + if (pt_end - pt_start > 2) + return 0; + + strncpy(sub_str, pt_start, pt_end - pt_start); + pt_start = pt_end + 1; + } + + key[nb_bytes++] = strtol(sub_str, NULL, 16); + } + + return nb_bytes; +} + +void +parse_sa_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct ipsec_sa *rule = NULL; + uint32_t ti; /*token index*/ + uint32_t *ri /*rule index*/; + uint32_t cipher_algo_p = 0; + uint32_t auth_algo_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t mode_p = 0; + + if (strcmp(tokens[0], "in") == 0) { + ri = &nb_sa_in; + + APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, + "too many sa rules, abort insertion\n"); + if (status->status < 0) + return; + + rule = &sa_in[*ri]; + } else { + ri = &nb_sa_out; + + APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, + "too many sa rules, abort insertion\n"); + if (status->status < 0) + return; + + rule = &sa_out[*ri]; + } + + /* spi number */ + APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); + if (status->status < 0) + return; + rule->spi = atoi(tokens[1]); + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "mode") == 0) { + APP_CHECK_PRESENCE(mode_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "ipv4-tunnel") == 0) + rule->flags = IP4_TUNNEL; + else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) + rule->flags = IP6_TUNNEL; + else if (strcmp(tokens[ti], "transport") == 0) + rule->flags = TRANSPORT; + else { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + mode_p = 1; + continue; + } + + if (strcmp(tokens[ti], "cipher_algo") == 0) { + const struct supported_cipher_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_cipher_algo(tokens[ti]); + + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->cipher_algo = algo->algo; + rule->block_size = algo->block_size; + rule->iv_len = algo->iv_len; + rule->cipher_key_len = algo->key_len; + + /* for NULL algorithm, no cipher key required */ + if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + cipher_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"cipher_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->cipher_key); + APP_CHECK(key_len == rule->cipher_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC) + rule->salt = (uint32_t)rte_rand(); + + if ((algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) || + (algo->algo == RTE_CRYPTO_CIPHER_AES_GCM)) { + key_len -= 4; + rule->cipher_key_len = key_len; + memcpy(&rule->salt, + &rule->cipher_key[key_len], 4); + } + + cipher_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "auth_algo") == 0) { + const struct supported_auth_algo *algo; + uint32_t key_len; + + APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], + status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + algo = find_match_auth_algo(tokens[ti]); + APP_CHECK(algo != NULL, status, "unrecognized " + "input \"%s\"", tokens[ti]); + + rule->auth_algo = algo->algo; + rule->auth_key_len = algo->key_len; + rule->digest_len = algo->digest_len; + rule->aad_len = algo->key_len; + + /* NULL algorithm and combined algos do not + * require auth key + */ + if (algo->key_not_req) { + auth_algo_p = 1; + continue; + } + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, + status, "unrecognized input \"%s\", " + "expect \"auth_key\"", tokens[ti]); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + key_len = parse_key_string(tokens[ti], + rule->auth_key); + APP_CHECK(key_len == rule->auth_key_len, status, + "unrecognized input \"%s\"", tokens[ti]); + if (status->status < 0) + return; + + auth_algo_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (rule->flags == IP4_TUNNEL) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->src.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (rule->flags == IP6_TUNNEL) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->src.ip.ip6.ip6_b, + ip.s6_addr, 16); + } else if (rule->flags == TRANSPORT) { + APP_CHECK(0, status, "unrecognized input " + "\"%s\"", tokens[ti]); + return; + } + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (rule->flags == IP4_TUNNEL) { + struct in_addr ip; + + APP_CHECK(parse_ipv4_addr(tokens[ti], + &ip, NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + rule->dst.ip.ip4 = rte_bswap32( + (uint32_t)ip.s_addr); + } else if (rule->flags == IP6_TUNNEL) { + struct in6_addr ip; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + NULL) == 0, status, + "unrecognized input \"%s\", " + "expect valid ipv6 addr", + tokens[ti]); + if (status->status < 0) + return; + memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); + } else if (rule->flags == TRANSPORT) { + APP_CHECK(0, status, "unrecognized " + "input \"%s\"", tokens[ti]); + return; + } + + dst_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + APP_CHECK(cipher_algo_p == 1, status, "missing cipher options"); + if (status->status < 0) + return; + + APP_CHECK(auth_algo_p == 1, status, "missing auth options"); + if (status->status < 0) + return; + + APP_CHECK(mode_p == 1, status, "missing mode option"); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static inline void +print_one_sa_rule(const struct ipsec_sa *sa, int inbound) +{ + uint32_t i; + uint8_t a, b, c, d; + + printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); + + for (i = 0; i < RTE_DIM(cipher_algos); i++) { + if (cipher_algos[i].algo == sa->cipher_algo) { + printf("%s ", cipher_algos[i].keyword); + break; + } + } + + for (i = 0; i < RTE_DIM(auth_algos); i++) { + if (auth_algos[i].algo == sa->auth_algo) { + printf("%s ", auth_algos[i].keyword); + break; + } + } + + printf("mode:"); + + switch (sa->flags) { + case IP4_TUNNEL: + printf("IP4Tunnel "); + uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); + uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); + break; + case IP6_TUNNEL: + printf("IP6Tunnel "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->src.ip.ip6.ip6_b[i]); + } + printf(" "); + for (i = 0; i < 16; i++) { + if (i % 2 && i != 15) + printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); + else + printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); + } + break; + case TRANSPORT: + printf("Transport"); + break; + } + printf("\n"); +} + +struct sa_ctx { + struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; + struct { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; + } xf[IPSEC_SA_MAX_ENTRIES]; +}; + +static struct sa_ctx * +sa_create(const char *name, int32_t socket_id) +{ + char s[PATH_MAX]; + struct sa_ctx *sa_ctx; + uint32_t mz_size; + const struct rte_memzone *mz; + + snprintf(s, sizeof(s), "%s_%u", name, socket_id); + + /* Create SA array table */ + printf("Creating SA context with %u maximum entries\n", + IPSEC_SA_MAX_ENTRIES); + + mz_size = sizeof(struct sa_ctx); + mz = rte_memzone_reserve(s, mz_size, socket_id, + RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) { + printf("Failed to allocate SA DB memory\n"); + rte_errno = -ENOMEM; + return NULL; + } + + sa_ctx = (struct sa_ctx *)mz->addr; + + return sa_ctx; +} + +static int +sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries, uint32_t inbound) +{ + struct ipsec_sa *sa; + uint32_t i, idx; + + for (i = 0; i < nb_entries; i++) { + idx = SPI2IDX(entries[i].spi); + sa = &sa_ctx->sa[idx]; + if (sa->spi != 0) { + printf("Index %u already in use by SPI %u\n", + idx, sa->spi); + return -EINVAL; + } + *sa = entries[i]; + sa->seq = 0; + + switch (sa->flags) { + case IP4_TUNNEL: + sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); + sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); + } + + if (inbound) { + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].b.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].b.cipher.op = + RTE_CRYPTO_CIPHER_OP_DECRYPT; + sa_ctx->xf[idx].b.next = NULL; + + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].a.auth.add_auth_data_length = + sa->aad_len; + sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].a.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].a.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].a.auth.op = + RTE_CRYPTO_AUTH_OP_VERIFY; + + } else { /* outbound */ + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; + sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.cipher.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.cipher.op = + RTE_CRYPTO_CIPHER_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; + sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; + sa_ctx->xf[idx].b.auth.add_auth_data_length = + sa->aad_len; + sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; + sa_ctx->xf[idx].b.auth.key.length = + sa->auth_key_len; + sa_ctx->xf[idx].b.auth.digest_length = + sa->digest_len; + sa_ctx->xf[idx].b.auth.op = + RTE_CRYPTO_AUTH_OP_GENERATE; + } + + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; + sa_ctx->xf[idx].b.next = NULL; + sa->xforms = &sa_ctx->xf[idx].a; + + print_one_sa_rule(sa, inbound); + } + + return 0; +} + +static inline int +sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries) +{ + return sa_add_rules(sa_ctx, entries, nb_entries, 0); +} + +static inline int +sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + uint32_t nb_entries) +{ + return sa_add_rules(sa_ctx, entries, nb_entries, 1); +} + +void +sa_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sa_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " + "initialized\n", socket_id); + + if (ctx->sa_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " + "initialized\n", socket_id); + + if (nb_sa_in > 0) { + name = "sa_in"; + ctx->sa_in = sa_create(name, socket_id); + if (ctx->sa_in == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); + + sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in); + } else + RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); + + if (nb_sa_out > 0) { + name = "sa_out"; + ctx->sa_out = sa_create(name, socket_id); + if (ctx->sa_out == NULL) + rte_exit(EXIT_FAILURE, "Error [%d] creating SA " + "context %s in socket %d\n", rte_errno, + name, socket_id); + + sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out); + } else + RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " + "specified\n"); +} + +int +inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) +{ + struct ipsec_mbuf_metadata *priv; + + priv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf)); + + return (sa_ctx->sa[sa_idx].spi == priv->sa->spi); +} + +static inline void +single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt, + struct ipsec_sa **sa_ret) +{ + struct esp_hdr *esp; + struct ip *ip; + uint32_t *src4_addr; + uint8_t *src6_addr; + struct ipsec_sa *sa; + + *sa_ret = NULL; + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + if (ip->ip_v == IPVERSION) + esp = (struct esp_hdr *)(ip + 1); + else + esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1); + + if (esp->spi == INVALID_SPI) + return; + + sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))]; + if (rte_be_to_cpu_32(esp->spi) != sa->spi) + return; + + switch (sa->flags) { + case IP4_TUNNEL: + src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src)); + if ((ip->ip_v == IPVERSION) && + (sa->src.ip.ip4 == *src4_addr) && + (sa->dst.ip.ip4 == *(src4_addr + 1))) + *sa_ret = sa; + break; + case IP6_TUNNEL: + src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src)); + if ((ip->ip_v == IP6_VERSION) && + !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) && + !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16)) + *sa_ret = sa; + break; + case TRANSPORT: + *sa_ret = sa; + } +} + +void +inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], + struct ipsec_sa *sa[], uint16_t nb_pkts) +{ + uint32_t i; + + for (i = 0; i < nb_pkts; i++) + single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]); +} + +void +outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], + struct ipsec_sa *sa[], uint16_t nb_pkts) +{ + uint32_t i; + + for (i = 0; i < nb_pkts; i++) + sa[i] = &sa_ctx->sa[sa_idx[i]]; +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/sp4.c b/src/seastar/dpdk/examples/ipsec-secgw/sp4.c new file mode 100644 index 00000000..38c72a92 --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/sp4.c @@ -0,0 +1,535 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Security Policies + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip.h> + +#include <rte_acl.h> +#include <rte_ip.h> + +#include "ipsec.h" +#include "parser.h" + +#define MAX_ACL_RULE_NUM 1024 + +/* + * Rule and trace formats definitions. + */ +enum { + PROTO_FIELD_IPV4, + SRC_FIELD_IPV4, + DST_FIELD_IPV4, + SRCP_FIELD_IPV4, + DSTP_FIELD_IPV4, + NUM_FIELDS_IPV4 +}; + +/* + * That effectively defines order of IPV4 classifications: + * - PROTO + * - SRC IP ADDRESS + * - DST IP ADDRESS + * - PORTS (SRC and DST) + */ +enum { + RTE_ACL_IPV4_PROTO, + RTE_ACL_IPV4_SRC, + RTE_ACL_IPV4_DST, + RTE_ACL_IPV4_PORTS, + RTE_ACL_IPV4_NUM +}; + +struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = { + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = PROTO_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PROTO, + .offset = 0, + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = SRC_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_SRC, + .offset = offsetof(struct ip, ip_src) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = DST_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_DST, + .offset = offsetof(struct ip, ip_dst) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = SRCP_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PORTS, + .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = DSTP_FIELD_IPV4, + .input_index = RTE_ACL_IPV4_PORTS, + .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + + sizeof(uint16_t) + }, +}; + +RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs)); + +struct acl4_rules acl4_rules_out[MAX_ACL_RULE_NUM]; +uint32_t nb_acl4_rules_out; + +struct acl4_rules acl4_rules_in[MAX_ACL_RULE_NUM]; +uint32_t nb_acl4_rules_in; + +void +parse_sp4_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl4_rules *rule_ipv4 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl4_rules_in; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, + "too many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv4 = &acl4_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl4_rules_out; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, + "too many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv4 = &acl4_rules_out[*ri]; + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; + } + + rule_ipv4->data.category_mask = 1; + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = + PROTECT(atoi(tokens[ti])); + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv4->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv4->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[1].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[1].mask_range.u32 = + depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK(parse_ipv4_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv4 addr", + tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[2].value.u32 = + rte_bswap32(ip.s_addr); + rule_ipv4->field[2].mask_range.u32 = + depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv4->field[0].value.u8 = (uint8_t)low; + rule_ipv4->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[3].value.u16 = port_low; + rule_ipv4->field[3].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv4->field[4].value.u16 = port_low; + rule_ipv4->field[4].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static void +print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra) +{ + uint8_t a, b, c, d; + + uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32, + &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d, + rule->field[SRC_FIELD_IPV4].mask_range.u32); + uint32_t_to_char(rule->field[DST_FIELD_IPV4].value.u32, + &a, &b, &c, &d); + printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d, + rule->field[DST_FIELD_IPV4].mask_range.u32); + printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ", + rule->field[SRCP_FIELD_IPV4].value.u16, + rule->field[SRCP_FIELD_IPV4].mask_range.u16, + rule->field[DSTP_FIELD_IPV4].value.u16, + rule->field[DSTP_FIELD_IPV4].mask_range.u16, + rule->field[PROTO_FIELD_IPV4].value.u8, + rule->field[PROTO_FIELD_IPV4].mask_range.u8); + if (extra) + printf("0x%x-0x%x-0x%x ", + rule->data.category_mask, + rule->data.priority, + rule->data.userdata); +} + +static inline void +dump_ip4_rules(const struct acl4_rules *rule, int32_t num, int32_t extra) +{ + int32_t i; + + for (i = 0; i < num; i++, rule++) { + printf("\t%d:", i + 1); + print_one_ip4_rule(rule, extra); + printf("\n"); + } +} + +static struct rte_acl_ctx * +acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules, + uint32_t rules_nb) +{ + char s[PATH_MAX]; + struct rte_acl_param acl_param; + struct rte_acl_config acl_build_param; + struct rte_acl_ctx *ctx; + + printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM); + + memset(&acl_param, 0, sizeof(acl_param)); + + /* Create ACL contexts */ + snprintf(s, sizeof(s), "%s_%d", name, socketid); + + printf("IPv4 %s entries [%u]:\n", s, rules_nb); + dump_ip4_rules(rules, rules_nb, 1); + + acl_param.name = s; + acl_param.socket_id = socketid; + acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip4_defs)); + acl_param.max_rule_num = MAX_ACL_RULE_NUM; + + ctx = rte_acl_create(&acl_param); + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + + if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules, + rules_nb) < 0) + rte_exit(EXIT_FAILURE, "add rules failed\n"); + + /* Perform builds */ + memset(&acl_build_param, 0, sizeof(acl_build_param)); + + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + acl_build_param.num_fields = RTE_DIM(ip4_defs); + memcpy(&acl_build_param.defs, ip4_defs, sizeof(ip4_defs)); + + if (rte_acl_build(ctx, &acl_build_param) != 0) + rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n"); + + rte_acl_dump(ctx); + + return ctx; +} + +void +sp4_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sp_ip4_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already " + "initialized\n", socket_id); + + if (ctx->sp_ip4_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already " + "initialized\n", socket_id); + + if (nb_acl4_rules_in > 0) { + name = "sp_ip4_in"; + ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_in, nb_acl4_rules_in); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Inbound rule " + "specified\n"); + + if (nb_acl4_rules_out > 0) { + name = "sp_ip4_out"; + ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, + socket_id, acl4_rules_out, nb_acl4_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv4 SP Outbound rule " + "specified\n"); +} diff --git a/src/seastar/dpdk/examples/ipsec-secgw/sp6.c b/src/seastar/dpdk/examples/ipsec-secgw/sp6.c new file mode 100644 index 00000000..62fb492c --- /dev/null +++ b/src/seastar/dpdk/examples/ipsec-secgw/sp6.c @@ -0,0 +1,649 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Security Policies + */ +#include <sys/types.h> +#include <netinet/in.h> +#include <netinet/ip6.h> + +#include <rte_acl.h> +#include <rte_ip.h> + +#include "ipsec.h" +#include "parser.h" + +#define MAX_ACL_RULE_NUM 1024 + +enum { + IP6_PROTO, + IP6_SRC0, + IP6_SRC1, + IP6_SRC2, + IP6_SRC3, + IP6_DST0, + IP6_DST1, + IP6_DST2, + IP6_DST3, + IP6_SRCP, + IP6_DSTP, + IP6_NUM +}; + +#define IP6_ADDR_SIZE 16 + +struct rte_acl_field_def ip6_defs[IP6_NUM] = { + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = IP6_PROTO, + .input_index = IP6_PROTO, + .offset = 0, + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC0, + .input_index = IP6_SRC0, + .offset = 2 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC1, + .input_index = IP6_SRC1, + .offset = 6 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC2, + .input_index = IP6_SRC2, + .offset = 10 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_SRC3, + .input_index = IP6_SRC3, + .offset = 14 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST0, + .input_index = IP6_DST0, + .offset = 18 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST1, + .input_index = IP6_DST1, + .offset = 22 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST2, + .input_index = IP6_DST2, + .offset = 26 + }, + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = 4, + .field_index = IP6_DST3, + .input_index = IP6_DST3, + .offset = 30 + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = IP6_SRCP, + .input_index = IP6_SRCP, + .offset = 34 + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = IP6_DSTP, + .input_index = IP6_SRCP, + .offset = 36 + } +}; + +RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs)); + +struct acl6_rules acl6_rules_out[MAX_ACL_RULE_NUM]; +uint32_t nb_acl6_rules_out; + +struct acl6_rules acl6_rules_in[MAX_ACL_RULE_NUM]; +uint32_t nb_acl6_rules_in; + +void +parse_sp6_tokens(char **tokens, uint32_t n_tokens, + struct parse_status *status) +{ + struct acl6_rules *rule_ipv6 = NULL; + + uint32_t *ri = NULL; /* rule index */ + uint32_t ti = 0; /* token index */ + + uint32_t esp_p = 0; + uint32_t protect_p = 0; + uint32_t bypass_p = 0; + uint32_t discard_p = 0; + uint32_t pri_p = 0; + uint32_t src_p = 0; + uint32_t dst_p = 0; + uint32_t proto_p = 0; + uint32_t sport_p = 0; + uint32_t dport_p = 0; + + if (strcmp(tokens[1], "in") == 0) { + ri = &nb_acl6_rules_in; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, "too " + "many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv6 = &acl6_rules_in[*ri]; + + } else if (strcmp(tokens[1], "out") == 0) { + ri = &nb_acl6_rules_out; + + APP_CHECK(*ri <= MAX_ACL_RULE_NUM - 1, status, "too " + "many sp rules, abort insertion\n"); + if (status->status < 0) + return; + + rule_ipv6 = &acl6_rules_out[*ri]; + + } else { + APP_CHECK(0, status, "unrecognized input \"%s\", expect" + " \"in\" or \"out\"\n", tokens[ti]); + return; + } + + rule_ipv6->data.category_mask = 1; + + + for (ti = 2; ti < n_tokens; ti++) { + if (strcmp(tokens[ti], "esp") == 0) { + /* currently do nothing */ + APP_CHECK_PRESENCE(esp_p, tokens[ti], status); + if (status->status < 0) + return; + esp_p = 1; + continue; + } + + if (strcmp(tokens[ti], "protect") == 0) { + APP_CHECK_PRESENCE(protect_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "bypass"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = + PROTECT(atoi(tokens[ti])); + + protect_p = 1; + continue; + } + + if (strcmp(tokens[ti], "bypass") == 0) { + APP_CHECK_PRESENCE(bypass_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(discard_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = BYPASS; + + bypass_p = 1; + continue; + } + + if (strcmp(tokens[ti], "discard") == 0) { + APP_CHECK_PRESENCE(discard_p, tokens[ti], status); + if (status->status < 0) + return; + APP_CHECK(protect_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "protect"); + if (status->status < 0) + return; + APP_CHECK(bypass_p == 0, status, "conflict item " + "between \"%s\" and \"%s\"", tokens[ti], + "discard"); + if (status->status < 0) + return; + + rule_ipv6->data.userdata = DISCARD; + + discard_p = 1; + continue; + } + + if (strcmp(tokens[ti], "pri") == 0) { + APP_CHECK_PRESENCE(pri_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + APP_CHECK_TOKEN_IS_NUM(tokens, ti, status); + if (status->status < 0) + return; + + rule_ipv6->data.priority = atoi(tokens[ti]); + + pri_p = 1; + continue; + } + + if (strcmp(tokens[ti], "src") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(src_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[1].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[1].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[2].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[2].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[3].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[3].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[4].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[4].mask_range.u32 = + (depth > 32) ? 32 : depth; + + src_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dst") == 0) { + struct in6_addr ip; + uint32_t depth; + + APP_CHECK_PRESENCE(dst_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, + &depth) == 0, status, "unrecognized " + "input \"%s\", expect valid ipv6 " + "addr", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[5].value.u32 = + (uint32_t)ip.s6_addr[0] << 24 | + (uint32_t)ip.s6_addr[1] << 16 | + (uint32_t)ip.s6_addr[2] << 8 | + (uint32_t)ip.s6_addr[3]; + rule_ipv6->field[5].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[6].value.u32 = + (uint32_t)ip.s6_addr[4] << 24 | + (uint32_t)ip.s6_addr[5] << 16 | + (uint32_t)ip.s6_addr[6] << 8 | + (uint32_t)ip.s6_addr[7]; + rule_ipv6->field[6].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[7].value.u32 = + (uint32_t)ip.s6_addr[8] << 24 | + (uint32_t)ip.s6_addr[9] << 16 | + (uint32_t)ip.s6_addr[10] << 8 | + (uint32_t)ip.s6_addr[11]; + rule_ipv6->field[7].mask_range.u32 = + (depth > 32) ? 32 : depth; + depth = (depth > 32) ? (depth - 32) : 0; + rule_ipv6->field[8].value.u32 = + (uint32_t)ip.s6_addr[12] << 24 | + (uint32_t)ip.s6_addr[13] << 16 | + (uint32_t)ip.s6_addr[14] << 8 | + (uint32_t)ip.s6_addr[15]; + rule_ipv6->field[8].mask_range.u32 = + (depth > 32) ? 32 : depth; + + dst_p = 1; + continue; + } + + if (strcmp(tokens[ti], "proto") == 0) { + uint16_t low, high; + + APP_CHECK_PRESENCE(proto_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &low, &high) + == 0, status, "unrecognized input \"%s\"" + ", expect \"from:to\"", tokens[ti]); + if (status->status < 0) + return; + APP_CHECK(low <= 0xff, status, "proto low " + "over-limit"); + if (status->status < 0) + return; + APP_CHECK(high <= 0xff, status, "proto high " + "over-limit"); + if (status->status < 0) + return; + + rule_ipv6->field[0].value.u8 = (uint8_t)low; + rule_ipv6->field[0].mask_range.u8 = (uint8_t)high; + + proto_p = 1; + continue; + } + + if (strcmp(tokens[ti], "sport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(sport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[9].value.u16 = port_low; + rule_ipv6->field[9].mask_range.u16 = port_high; + + sport_p = 1; + continue; + } + + if (strcmp(tokens[ti], "dport") == 0) { + uint16_t port_low, port_high; + + APP_CHECK_PRESENCE(dport_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + APP_CHECK(parse_range(tokens[ti], &port_low, + &port_high) == 0, status, "unrecognized " + "input \"%s\", expect \"port_from:" + "port_to\"", tokens[ti]); + if (status->status < 0) + return; + + rule_ipv6->field[10].value.u16 = port_low; + rule_ipv6->field[10].mask_range.u16 = port_high; + + dport_p = 1; + continue; + } + + /* unrecognizeable input */ + APP_CHECK(0, status, "unrecognized input \"%s\"", + tokens[ti]); + return; + } + + /* check if argument(s) are missing */ + APP_CHECK(esp_p == 1, status, "missing argument \"esp\""); + if (status->status < 0) + return; + + APP_CHECK(protect_p | bypass_p | discard_p, status, "missing " + "argument \"protect\", \"bypass\", or \"discard\""); + if (status->status < 0) + return; + + *ri = *ri + 1; +} + +static inline void +print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra) +{ + uint8_t a, b, c, d; + + uint32_t_to_char(rule->field[IP6_SRC0].value.u32, + &a, &b, &c, &d); + printf("%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC1].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC2].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_SRC3].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d, + rule->field[IP6_SRC0].mask_range.u32 + + rule->field[IP6_SRC1].mask_range.u32 + + rule->field[IP6_SRC2].mask_range.u32 + + rule->field[IP6_SRC3].mask_range.u32); + + uint32_t_to_char(rule->field[IP6_DST0].value.u32, + &a, &b, &c, &d); + printf("%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST1].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST2].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x", a, b, c, d); + uint32_t_to_char(rule->field[IP6_DST3].value.u32, + &a, &b, &c, &d); + printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d, + rule->field[IP6_DST0].mask_range.u32 + + rule->field[IP6_DST1].mask_range.u32 + + rule->field[IP6_DST2].mask_range.u32 + + rule->field[IP6_DST3].mask_range.u32); + + printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ", + rule->field[IP6_SRCP].value.u16, + rule->field[IP6_SRCP].mask_range.u16, + rule->field[IP6_DSTP].value.u16, + rule->field[IP6_DSTP].mask_range.u16, + rule->field[IP6_PROTO].value.u8, + rule->field[IP6_PROTO].mask_range.u8); + if (extra) + printf("0x%x-0x%x-0x%x ", + rule->data.category_mask, + rule->data.priority, + rule->data.userdata); +} + +static inline void +dump_ip6_rules(const struct acl6_rules *rule, int32_t num, int32_t extra) +{ + int32_t i; + + for (i = 0; i < num; i++, rule++) { + printf("\t%d:", i + 1); + print_one_ip6_rule(rule, extra); + printf("\n"); + } +} + +static struct rte_acl_ctx * +acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules, + uint32_t rules_nb) +{ + char s[PATH_MAX]; + struct rte_acl_param acl_param; + struct rte_acl_config acl_build_param; + struct rte_acl_ctx *ctx; + + printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM); + + memset(&acl_param, 0, sizeof(acl_param)); + + /* Create ACL contexts */ + snprintf(s, sizeof(s), "%s_%d", name, socketid); + + printf("IPv4 %s entries [%u]:\n", s, rules_nb); + dump_ip6_rules(rules, rules_nb, 1); + + acl_param.name = s; + acl_param.socket_id = socketid; + acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip6_defs)); + acl_param.max_rule_num = MAX_ACL_RULE_NUM; + + ctx = rte_acl_create(&acl_param); + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + + if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules, + rules_nb) < 0) + rte_exit(EXIT_FAILURE, "add rules failed\n"); + + /* Perform builds */ + memset(&acl_build_param, 0, sizeof(acl_build_param)); + + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + acl_build_param.num_fields = RTE_DIM(ip6_defs); + memcpy(&acl_build_param.defs, ip6_defs, sizeof(ip6_defs)); + + if (rte_acl_build(ctx, &acl_build_param) != 0) + rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n"); + + rte_acl_dump(ctx); + + return ctx; +} + +void +sp6_init(struct socket_ctx *ctx, int32_t socket_id) +{ + const char *name; + + if (ctx == NULL) + rte_exit(EXIT_FAILURE, "NULL context.\n"); + + if (ctx->sp_ip6_in != NULL) + rte_exit(EXIT_FAILURE, "Inbound IPv6 SP DB for socket %u " + "already initialized\n", socket_id); + + if (ctx->sp_ip6_out != NULL) + rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u " + "already initialized\n", socket_id); + + if (nb_acl6_rules_in > 0) { + name = "sp_ip6_in"; + ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_in, nb_acl6_rules_in); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Inbound rule " + "specified\n"); + + if (nb_acl6_rules_out > 0) { + name = "sp_ip6_out"; + ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, + socket_id, acl6_rules_out, nb_acl6_rules_out); + } else + RTE_LOG(WARNING, IPSEC, "No IPv6 SP Outbound rule " + "specified\n"); +} |