summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:40:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:40:16 +0000
commitf71a078c8abe5e11d23ef451a4a6bae6e3dad9fe (patch)
treed2bd79992fcea321b48cff207d2d44f98f4bdf57
parentReleasing progress-linux version 1:7.0.3-1~progress7.99u1. (diff)
downloadsuricata-f71a078c8abe5e11d23ef451a4a6bae6e3dad9fe.tar.xz
suricata-f71a078c8abe5e11d23ef451a4a6bae6e3dad9fe.zip
Merging upstream version 1:7.0.4.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--ChangeLog34
-rwxr-xr-xconfigure46
-rw-r--r--configure.ac6
-rw-r--r--doc/userguide/configuration/suricata-yaml.rst6
-rw-r--r--doc/userguide/rules/payload-keywords.rst5
-rw-r--r--doc/userguide/suricata.12
-rw-r--r--doc/userguide/suricatactl-filestore.12
-rw-r--r--doc/userguide/suricatactl.12
-rw-r--r--doc/userguide/suricatasc.12
-rw-r--r--doc/userguide/userguide.pdfbin3799337 -> 3799748 bytes
-rw-r--r--rust/derive/Cargo.toml2
-rw-r--r--rust/dist/rust-bindings.h2
-rw-r--r--rust/src/applayertemplate/template.rs20
-rw-r--r--rust/src/dhcp/logger.rs4
-rw-r--r--rust/src/dns/log.rs2
-rw-r--r--rust/src/ike/ikev1.rs2
-rw-r--r--rust/src/jsonbuilder.rs16
-rw-r--r--rust/src/mqtt/mqtt.rs8
-rw-r--r--rust/src/nfs/nfs.rs2
-rw-r--r--rust/src/smb/log.rs4
-rw-r--r--rust/src/ssh/logger.rs10
-rw-r--r--rust/src/ssh/ssh.rs6
-rwxr-xr-xsrc/Makefile.am3
-rw-r--r--src/Makefile.in3
-rw-r--r--src/app-layer-parser.c4
-rw-r--r--src/autoconf.h8
-rw-r--r--src/conf-yaml-loader.c2
-rw-r--r--src/decode-pppoe.c27
-rw-r--r--src/detect-engine-address.c24
-rw-r--r--src/detect-engine-iponly.c262
-rw-r--r--src/detect-engine-loader.c11
-rw-r--r--src/detect-engine-loader.h11
-rw-r--r--src/detect-engine-mpm.c20
-rw-r--r--src/detect-engine-siggroup.c3
-rw-r--r--src/detect-filestore.c31
-rw-r--r--src/detect-http-header.c12
-rw-r--r--src/detect-parse.c1
-rw-r--r--src/detect-tls-certs.c7
-rw-r--r--src/detect.h3
-rw-r--r--src/flow-timeout.c2
-rw-r--r--src/flow-worker.c14
-rw-r--r--src/flow.h5
-rw-r--r--src/output-filestore.c8
-rw-r--r--src/output-json-stats.c22
-rw-r--r--src/output-json-stats.h2
-rw-r--r--src/output-tx.c4
-rw-r--r--src/runmode-dpdk.c16
-rw-r--r--src/runmode-napatech.c7
-rw-r--r--src/runmode-netmap.c4
-rw-r--r--src/runmode-pfring.c5
-rw-r--r--src/runmode-unittests.c3
-rw-r--r--src/runmode-unix-socket.c6
-rw-r--r--src/source-dpdk.c2
-rw-r--r--src/source-netmap.c1
-rw-r--r--src/source-pfring.c2
-rw-r--r--src/suricata.c30
-rw-r--r--src/tests/output-json-stats.c70
-rw-r--r--src/tm-threads.c10
-rw-r--r--src/tmqh-simple.c5
-rw-r--r--src/util-decode-mime.c1
-rw-r--r--src/util-error.c1
-rw-r--r--src/util-error.h1
-rw-r--r--src/util-file.c13
-rw-r--r--src/util-hugepages.c91
-rw-r--r--src/util-streaming-buffer.c21
-rw-r--r--suricata-update/.github/PULL_REQUEST_TEMPLATE.md5
-rw-r--r--suricata-update/.github/workflows/tests.yml20
-rw-r--r--suricata-update/.readthedocs.yaml17
-rw-r--r--suricata-update/CHANGELOG.md20
-rw-r--r--suricata-update/Makefile6
-rw-r--r--suricata-update/doc/quickstart.rst2
-rw-r--r--suricata-update/suricata/update/data/index.py200
-rw-r--r--suricata-update/suricata/update/engine.py2
-rw-r--r--suricata-update/suricata/update/main.py54
-rw-r--r--suricata-update/suricata/update/matchers.py6
-rw-r--r--suricata-update/suricata/update/osinfo.py2
-rw-r--r--suricata-update/suricata/update/rule.py2
-rw-r--r--suricata-update/suricata/update/version.py2
-rwxr-xr-xsuricata-update/tests/integration_tests.py9
-rw-r--r--suricata-update/tests/suricata-test-rules.zipbin0 -> 2391 bytes
-rw-r--r--suricata-update/tests/test_main.py4
-rw-r--r--suricata-update/tox.ini2
82 files changed, 981 insertions, 333 deletions
diff --git a/ChangeLog b/ChangeLog
index cf074b1..69c9ce3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,37 @@
+7.0.4 -- 2024-03-19
+
+Security #6868: eve: excessive ssh long banner logging (7.0.x backport)(CVE 2024-28870)
+Security #6801: ssh: quadratic complexity in overlong banner (7.0.x backport)(CVE 2024-28870)
+Security #6759: libhtp: quadratic complexity checking after request line mission protocol (7.0.x backport)(CVE 2024-28871)
+Security #6798: output/filestore: timeout because of running OutputTxLog on useless packets (7.0.x backport)
+Bug #6842: Error message from netmap when using Netmap pipes (with lb) (7.0.x backport)
+Bug #6828: dpdk: NUMA warning on non-NUMA system (7.0.x backport)
+Bug #6816: capture plugins: capture plugins unusable due to initialization order (7.0.x backport)
+Bug #6812: pfring: memory leak (7.0.x backport)
+Bug #6810: decode/pppoe: Suspicious pointer scaling (7.0.x backport)
+Bug #6791: cppcheck 2.11 errors (7.0.x backport)
+Bug #6785: detect/tls.certs: direction flag checked against wrong field (7.0.x backport)
+Bug #6784: util/mime: Memory leak at util-decode-mime.c:MimeDecInitParser (7.0.x backport)
+Bug #6768: multi-tenancy: dead lock during tenant loading (7.0.x backport)
+Bug #6765: Hugepages Error for FreeBSD when kernel NUMA build option is not enabled (7.0.x backport)
+Bug #6764: Huge increase on Suricata load time with a lot of ip-only rules and bigger HOME_NET (7.0.x backport)
+Bug #6761: Hugepages Error for ARM64 and af-packet IPS mode (7.0.x backport)
+Bug #6756: Netmap: deadlock if netmap_open fails (7.0.x backport)
+Bug #6746: Suricata 7.0.2 parent interface object in stats contains VLAN-ID as keys (7.0.x backport)
+Bug #6742: dpdk: automatic cache calculation is broken (7.0.x backport)
+Bug #6738: dpdk: property configuration can lead to integer overflow (7.0.x backport)
+Bug #6734: tcp: tcp flow flags changing incorrectly when ruleset contains content matching (7.0.x backport)
+Bug #6622: detect/filestore: flow, to_server was broken by moving files into transactions (7.0.x backport)
+Bug #6593: mqtt: frames on TCP are not set properly when parsing multiple PDUs in one go (7.0.x backport)
+Bug #6580: ssh: no alert on packet with Message Code: New Keys (21) (7.0.x backport)
+Bug #6538: drop: assertion failed !(PKT_IS_PSEUDOPKT(p)) && !PacketCheckAction(p, ACTION_DROP) (7.0.x backport)
+Bug #6537: detect/filestore: be more explicit about the U16_MAX limit per signature group head (7.0.x backport)
+Optimization #6774: app-layer/template: no limit on txs number (7.0.x backport)
+Feature #6740: dpdk: warn the user if user-settings are adjusted to the device capabilities (7.0.x backport)
+Task #6870: libhtp 0.5.47 (7.0.x backport)
+Task #6749: doc: mention X710 RX descriptor limitation (7.0.x backport)
+Documentation #6709: userguide/payload: fix explanation about bsize ranges (7.0.x backport)
+
7.0.3 -- 2024-02-08
Security #6717: http2: evasion by splitting header fields over frames (7.0.x backport)
diff --git a/configure b/configure
index f5956e9..fb5335d 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.71 for suricata 7.0.3.
+# Generated by GNU Autoconf 2.71 for suricata 7.0.4.
#
#
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
@@ -682,8 +682,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='suricata'
PACKAGE_TARNAME='suricata'
-PACKAGE_VERSION='7.0.3'
-PACKAGE_STRING='suricata 7.0.3'
+PACKAGE_VERSION='7.0.4'
+PACKAGE_STRING='suricata 7.0.4'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1661,7 +1661,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures suricata 7.0.3 to adapt to many kinds of systems.
+\`configure' configures suricata 7.0.4 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1732,7 +1732,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of suricata 7.0.3:";;
+ short | recursive ) echo "Configuration of suricata 7.0.4:";;
esac
cat <<\_ACEOF
@@ -1975,7 +1975,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-suricata configure 7.0.3
+suricata configure 7.0.4
generated by GNU Autoconf 2.71
Copyright (C) 2021 Free Software Foundation, Inc.
@@ -2567,7 +2567,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by suricata $as_me 7.0.3, which was
+It was created by suricata $as_me 7.0.4, which was
generated by GNU Autoconf 2.71. Invocation command line was
$ $0$ac_configure_args_raw
@@ -4062,7 +4062,7 @@ fi
# Define the identity of the package.
PACKAGE='suricata'
- VERSION='7.0.3'
+ VERSION='7.0.4'
printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
@@ -22946,7 +22946,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by suricata $as_me 7.0.3, which was
+This file was extended by suricata $as_me 7.0.4, which was
generated by GNU Autoconf 2.71. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -23014,7 +23014,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config='$ac_cs_config_escaped'
ac_cs_version="\\
-suricata config.status 7.0.3
+suricata config.status 7.0.4
configured by $0, generated by GNU Autoconf 2.71,
with options \\"\$ac_cs_config\\"
@@ -24917,19 +24917,19 @@ fi
fi
pkg_failed=no
-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for htp >= 0.5.46" >&5
-printf %s "checking for htp >= 0.5.46... " >&6; }
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for htp >= 0.5.47" >&5
+printf %s "checking for htp >= 0.5.47... " >&6; }
if test -n "$LIBHTPMINVERSION_CFLAGS"; then
pkg_cv_LIBHTPMINVERSION_CFLAGS="$LIBHTPMINVERSION_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
- { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"htp >= 0.5.46\""; } >&5
- ($PKG_CONFIG --exists --print-errors "htp >= 0.5.46") 2>&5
+ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"htp >= 0.5.47\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "htp >= 0.5.47") 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
- pkg_cv_LIBHTPMINVERSION_CFLAGS=`$PKG_CONFIG --cflags "htp >= 0.5.46" 2>/dev/null`
+ pkg_cv_LIBHTPMINVERSION_CFLAGS=`$PKG_CONFIG --cflags "htp >= 0.5.47" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
@@ -24941,12 +24941,12 @@ if test -n "$LIBHTPMINVERSION_LIBS"; then
pkg_cv_LIBHTPMINVERSION_LIBS="$LIBHTPMINVERSION_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
- { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"htp >= 0.5.46\""; } >&5
- ($PKG_CONFIG --exists --print-errors "htp >= 0.5.46") 2>&5
+ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"htp >= 0.5.47\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "htp >= 0.5.47") 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
- pkg_cv_LIBHTPMINVERSION_LIBS=`$PKG_CONFIG --libs "htp >= 0.5.46" 2>/dev/null`
+ pkg_cv_LIBHTPMINVERSION_LIBS=`$PKG_CONFIG --libs "htp >= 0.5.47" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
@@ -24967,9 +24967,9 @@ else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
- LIBHTPMINVERSION_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "htp >= 0.5.46" 2>&1`
+ LIBHTPMINVERSION_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "htp >= 0.5.47" 2>&1`
else
- LIBHTPMINVERSION_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "htp >= 0.5.46" 2>&1`
+ LIBHTPMINVERSION_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "htp >= 0.5.47" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$LIBHTPMINVERSION_PKG_ERRORS" >&5
@@ -25060,7 +25060,7 @@ printf "%s\n" "yes" >&6; }
fi
if test "$libhtp_devver_found" = "no"; then
echo
- echo " ERROR! libhtp was found but it is neither >= 0.5.46, nor the dev 0.5.X"
+ echo " ERROR! libhtp was found but it is neither >= 0.5.47, nor the dev 0.5.X"
echo
exit 1
fi
@@ -35272,7 +35272,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by suricata $as_me 7.0.3, which was
+This file was extended by suricata $as_me 7.0.4, which was
generated by GNU Autoconf 2.71. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -35340,7 +35340,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config='$ac_cs_config_escaped'
ac_cs_version="\\
-suricata config.status 7.0.3
+suricata config.status 7.0.4
configured by $0, generated by GNU Autoconf 2.71,
with options \\"\$ac_cs_config\\"
diff --git a/configure.ac b/configure.ac
index 4b2bb94..72f333e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
- AC_INIT([suricata],[7.0.3])
+ AC_INIT([suricata],[7.0.4])
m4_ifndef([AM_SILENT_RULES], [m4_define([AM_SILENT_RULES],[])])AM_SILENT_RULES([yes])
AC_CONFIG_HEADERS([src/autoconf.h])
AC_CONFIG_SRCDIR([src/suricata.c])
@@ -1575,12 +1575,12 @@
echo
exit 1
fi
- PKG_CHECK_MODULES(LIBHTPMINVERSION, [htp >= 0.5.46],[libhtp_minver_found="yes"],[libhtp_minver_found="no"])
+ PKG_CHECK_MODULES(LIBHTPMINVERSION, [htp >= 0.5.47],[libhtp_minver_found="yes"],[libhtp_minver_found="no"])
if test "$libhtp_minver_found" = "no"; then
PKG_CHECK_MODULES(LIBHTPDEVVERSION, [htp = 0.5.X],[libhtp_devver_found="yes"],[libhtp_devver_found="no"])
if test "$libhtp_devver_found" = "no"; then
echo
- echo " ERROR! libhtp was found but it is neither >= 0.5.46, nor the dev 0.5.X"
+ echo " ERROR! libhtp was found but it is neither >= 0.5.47, nor the dev 0.5.X"
echo
exit 1
fi
diff --git a/doc/userguide/configuration/suricata-yaml.rst b/doc/userguide/configuration/suricata-yaml.rst
index 6e2e0cd..6eea5e8 100644
--- a/doc/userguide/configuration/suricata-yaml.rst
+++ b/doc/userguide/configuration/suricata-yaml.rst
@@ -2130,7 +2130,11 @@ size of the cache is covered in the YAML file.
To be able to run DPDK on Intel cards, it is required to change the default
Intel driver to either `vfio-pci` or `igb_uio` driver. The process is
described in `DPDK manual page regarding Linux drivers
-<https://doc.dpdk.org/guides/linux_gsg/linux_drivers.html>`_.
+<https://doc.dpdk.org/guides/linux_gsg/linux_drivers.html>`_.
+The Intel NICs have the amount of RX/TX descriptors capped at 4096.
+This should be possible to change by manually compiling the DPDK while
+changing the value of respective macros for the desired drivers
+(e.g. IXGBE_MAX_RING_DESC/I40E_MAX_RING_DESC).
DPDK is natively supported by Mellanox and thus their NICs should work
"out of the box".
diff --git a/doc/userguide/rules/payload-keywords.rst b/doc/userguide/rules/payload-keywords.rst
index 9a609a2..086e11c 100644
--- a/doc/userguide/rules/payload-keywords.rst
+++ b/doc/userguide/rules/payload-keywords.rst
@@ -282,7 +282,7 @@ precision to the content match, previously this could have been done with ``isda
An optional operator can be specified; if no operator is present, the operator will
default to '='. When a relational operator is used, e.g., '<', '>' or '<>' (range),
-the bsize value will be compared using the relational operator. Ranges are inclusive.
+the bsize value will be compared using the relational operator. Ranges are exclusive.
If one or more ``content`` keywords precedes ``bsize``, each occurrence of ``content``
will be inspected and an error will be raised if the content length and the bsize
@@ -325,6 +325,9 @@ Examples of ``bsize`` in a rule:
alert dns any any -> any any (msg:"test bsize rule"; dns.query; content:"middle"; bsize:6<>15; sid:126; rev:1;)
+To emphasize how range works: in the example above, a match will occur if
+``bsize`` is greater than 6 and less than 15.
+
dsize
-----
diff --git a/doc/userguide/suricata.1 b/doc/userguide/suricata.1
index f8f3efa..9564f6a 100644
--- a/doc/userguide/suricata.1
+++ b/doc/userguide/suricata.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "SURICATA" "1" "Feb 08, 2024" "7.0.3" "Suricata"
+.TH "SURICATA" "1" "Mar 19, 2024" "7.0.4" "Suricata"
.SH NAME
suricata \- Suricata
.SH SYNOPSIS
diff --git a/doc/userguide/suricatactl-filestore.1 b/doc/userguide/suricatactl-filestore.1
index b470a26..40688f8 100644
--- a/doc/userguide/suricatactl-filestore.1
+++ b/doc/userguide/suricatactl-filestore.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "SURICATACTL-FILESTORE" "1" "Feb 08, 2024" "7.0.3" "Suricata"
+.TH "SURICATACTL-FILESTORE" "1" "Mar 19, 2024" "7.0.4" "Suricata"
.SH NAME
suricatactl-filestore \- Perform actions on filestore
.SH SYNOPSIS
diff --git a/doc/userguide/suricatactl.1 b/doc/userguide/suricatactl.1
index 9b24460..90b0801 100644
--- a/doc/userguide/suricatactl.1
+++ b/doc/userguide/suricatactl.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "SURICATACTL" "1" "Feb 08, 2024" "7.0.3" "Suricata"
+.TH "SURICATACTL" "1" "Mar 19, 2024" "7.0.4" "Suricata"
.SH NAME
suricatactl \- Suricata Control
.SH SYNOPSIS
diff --git a/doc/userguide/suricatasc.1 b/doc/userguide/suricatasc.1
index 2b41ae0..40a4bc5 100644
--- a/doc/userguide/suricatasc.1
+++ b/doc/userguide/suricatasc.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "SURICATASC" "1" "Feb 08, 2024" "7.0.3" "Suricata"
+.TH "SURICATASC" "1" "Mar 19, 2024" "7.0.4" "Suricata"
.SH NAME
suricatasc \- Tool to interact via unix socket
.SH SYNOPSIS
diff --git a/doc/userguide/userguide.pdf b/doc/userguide/userguide.pdf
index 42af865..2ff1757 100644
--- a/doc/userguide/userguide.pdf
+++ b/doc/userguide/userguide.pdf
Binary files differ
diff --git a/rust/derive/Cargo.toml b/rust/derive/Cargo.toml
index 4aa81b4..c697f68 100644
--- a/rust/derive/Cargo.toml
+++ b/rust/derive/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "suricata-derive"
-version = "7.0.3"
+version = "7.0.4"
edition = "2021"
[lib]
diff --git a/rust/dist/rust-bindings.h b/rust/dist/rust-bindings.h
index 77afc3b..56f350c 100644
--- a/rust/dist/rust-bindings.h
+++ b/rust/dist/rust-bindings.h
@@ -4424,6 +4424,8 @@
#define RS_MIME_MAX_TOKEN_LEN 255
+#define SSH_MAX_BANNER_LEN 256
+
#define HTTP2_DECOMPRESSION_CHUNK_SIZE 4096
#define HTTP2_STATIC_HEADERS_NUMBER 61
diff --git a/rust/src/applayertemplate/template.rs b/rust/src/applayertemplate/template.rs
index acc6c26..dbbc784 100644
--- a/rust/src/applayertemplate/template.rs
+++ b/rust/src/applayertemplate/template.rs
@@ -17,6 +17,7 @@
use super::parser;
use crate::applayer::{self, *};
+use crate::conf::conf_get;
use crate::core::{AppProto, Flow, ALPROTO_UNKNOWN, IPPROTO_TCP};
use nom7 as nom;
use std;
@@ -24,10 +25,14 @@ use std::collections::VecDeque;
use std::ffi::CString;
use std::os::raw::{c_char, c_int, c_void};
+static mut TEMPLATE_MAX_TX: usize = 256;
+
static mut ALPROTO_TEMPLATE: AppProto = ALPROTO_UNKNOWN;
#[derive(AppLayerEvent)]
-enum TemplateEvent {}
+enum TemplateEvent {
+ TooManyTransactions,
+}
pub struct TemplateTransaction {
tx_id: u64,
@@ -145,7 +150,13 @@ impl TemplateState {
SCLogNotice!("Request: {}", request);
let mut tx = self.new_tx();
tx.request = Some(request);
+ if self.transactions.len() >= unsafe {TEMPLATE_MAX_TX} {
+ tx.tx_data.set_event(TemplateEvent::TooManyTransactions as u8);
+ }
self.transactions.push_back(tx);
+ if self.transactions.len() >= unsafe {TEMPLATE_MAX_TX} {
+ return AppLayerResult::err();
+ }
}
Err(nom::Err::Incomplete(_)) => {
// Not enough data. This parser doesn't give us a good indication
@@ -429,6 +440,13 @@ pub unsafe extern "C" fn rs_template_register_parser() {
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name) != 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
+ if let Some(val) = conf_get("app-layer.protocols.template.max-tx") {
+ if let Ok(v) = val.parse::<usize>() {
+ TEMPLATE_MAX_TX = v;
+ } else {
+ SCLogError!("Invalid value for template.max-tx");
+ }
+ }
SCLogNotice!("Rust template parser registered.");
} else {
SCLogNotice!("Protocol detector and parser disabled for TEMPLATE.");
diff --git a/rust/src/dhcp/logger.rs b/rust/src/dhcp/logger.rs
index b29e215..3c86b1b 100644
--- a/rust/src/dhcp/logger.rs
+++ b/rust/src/dhcp/logger.rs
@@ -229,7 +229,7 @@ impl DHCPLogger {
fn log_opt_dns_server(&self, js: &mut JsonBuilder, option: &DHCPOptGeneric) -> Result<(), JsonError> {
js.open_array("dns_servers")?;
for i in 0..(option.data.len() / 4) {
- let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4].to_vec());
+ let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4]);
js.append_string(&val)?;
}
js.close()?;
@@ -239,7 +239,7 @@ impl DHCPLogger {
fn log_opt_routers(&self, js: &mut JsonBuilder, option: &DHCPOptGeneric) -> Result<(), JsonError> {
js.open_array("routers")?;
for i in 0..(option.data.len() / 4) {
- let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4].to_vec());
+ let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4]);
js.append_string(&val)?;
}
js.close()?;
diff --git a/rust/src/dns/log.rs b/rust/src/dns/log.rs
index 5212b1a..86325d5 100644
--- a/rust/src/dns/log.rs
+++ b/rust/src/dns/log.rs
@@ -368,7 +368,7 @@ pub fn dns_rcode_string(flags: u16) -> String {
}
/// Format bytes as an IP address string.
-pub fn dns_print_addr(addr: &Vec<u8>) -> std::string::String {
+pub fn dns_print_addr(addr: &[u8]) -> std::string::String {
if addr.len() == 4 {
return format!("{}.{}.{}.{}", addr[0], addr[1], addr[2], addr[3]);
} else if addr.len() == 16 {
diff --git a/rust/src/ike/ikev1.rs b/rust/src/ike/ikev1.rs
index 1e79c29..6f598f9 100644
--- a/rust/src/ike/ikev1.rs
+++ b/rust/src/ike/ikev1.rs
@@ -53,7 +53,7 @@ impl Ikev1ParticipantData {
}
pub fn update(
- &mut self, key_exchange: &str, nonce: &str, transforms: &Vec<Vec<SaAttribute>>,
+ &mut self, key_exchange: &str, nonce: &str, transforms: &[Vec<SaAttribute>],
) {
self.key_exchange = key_exchange.to_string();
self.nonce = nonce.to_string();
diff --git a/rust/src/jsonbuilder.rs b/rust/src/jsonbuilder.rs
index 9ff6234..7264be5 100644
--- a/rust/src/jsonbuilder.rs
+++ b/rust/src/jsonbuilder.rs
@@ -527,6 +527,22 @@ impl JsonBuilder {
}
}
+ /// Set a key and a string value (from bytes) on an object, with a limited size
+ pub fn set_string_from_bytes_limited(&mut self, key: &str, val: &[u8], limit: usize) -> Result<&mut Self, JsonError> {
+ let mut valtrunc = Vec::new();
+ let val = if val.len() > limit {
+ valtrunc.extend_from_slice(&val[..limit]);
+ valtrunc.extend_from_slice(b"[truncated]");
+ &valtrunc
+ } else {
+ val
+ };
+ match std::str::from_utf8(val) {
+ Ok(s) => self.set_string(key, s),
+ Err(_) => self.set_string(key, &try_string_from_bytes(val)?),
+ }
+ }
+
/// Set a key and a string field as the base64 encoded string of the value.
pub fn set_base64(&mut self, key: &str, val: &[u8]) -> Result<&mut Self, JsonError> {
match self.current_state() {
diff --git a/rust/src/mqtt/mqtt.rs b/rust/src/mqtt/mqtt.rs
index 3f110df..8260251 100644
--- a/rust/src/mqtt/mqtt.rs
+++ b/rust/src/mqtt/mqtt.rs
@@ -433,8 +433,8 @@ impl MQTTState {
let _pdu = Frame::new(
flow,
&stream_slice,
- input,
- current.len() as i64,
+ current,
+ (current.len() - rem.len()) as i64,
MQTTFrameType::Pdu as u8,
);
SCLogDebug!("request msg {:?}", msg);
@@ -518,8 +518,8 @@ impl MQTTState {
let _pdu = Frame::new(
flow,
&stream_slice,
- input,
- input.len() as i64,
+ current,
+ (current.len() - rem.len()) as i64,
MQTTFrameType::Pdu as u8,
);
diff --git a/rust/src/nfs/nfs.rs b/rust/src/nfs/nfs.rs
index dfb5e0e..4a1c362 100644
--- a/rust/src/nfs/nfs.rs
+++ b/rust/src/nfs/nfs.rs
@@ -497,7 +497,7 @@ impl NFSState {
}
// TODO maybe not enough users to justify a func
- pub fn mark_response_tx_done(&mut self, xid: u32, rpc_status: u32, nfs_status: u32, resp_handle: &Vec<u8>)
+ pub fn mark_response_tx_done(&mut self, xid: u32, rpc_status: u32, nfs_status: u32, resp_handle: &[u8])
{
if let Some(mytx) = self.get_tx_by_xid(xid) {
mytx.response_done = true;
diff --git a/rust/src/smb/log.rs b/rust/src/smb/log.rs
index 8496574..e242d02 100644
--- a/rust/src/smb/log.rs
+++ b/rust/src/smb/log.rs
@@ -38,7 +38,7 @@ fn debug_add_progress(jsb: &mut JsonBuilder, tx: &SMBTransaction) -> Result<(),
/// take in a file GUID (16 bytes) or FID (2 bytes). Also deal
/// with our frankenFID (2 bytes + 4 user_id)
-fn fuid_to_string(fuid: &Vec<u8>) -> String {
+fn fuid_to_string(fuid: &[u8]) -> String {
let fuid_len = fuid.len();
if fuid_len == 16 {
guid_to_string(fuid)
@@ -52,7 +52,7 @@ fn fuid_to_string(fuid: &Vec<u8>) -> String {
}
}
-fn guid_to_string(guid: &Vec<u8>) -> String {
+fn guid_to_string(guid: &[u8]) -> String {
if guid.len() == 16 {
let output = format!("{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
guid[3], guid[2], guid[1], guid[0],
diff --git a/rust/src/ssh/logger.rs b/rust/src/ssh/logger.rs
index 9bc7d7c..e83d288 100644
--- a/rust/src/ssh/logger.rs
+++ b/rust/src/ssh/logger.rs
@@ -15,7 +15,7 @@
* 02110-1301, USA.
*/
-use super::ssh::SSHTransaction;
+use super::ssh::{SSHTransaction, SSH_MAX_BANNER_LEN};
use crate::jsonbuilder::{JsonBuilder, JsonError};
fn log_ssh(tx: &SSHTransaction, js: &mut JsonBuilder) -> Result<bool, JsonError> {
@@ -24,9 +24,9 @@ fn log_ssh(tx: &SSHTransaction, js: &mut JsonBuilder) -> Result<bool, JsonError>
}
if !tx.cli_hdr.protover.is_empty() {
js.open_object("client")?;
- js.set_string_from_bytes("proto_version", &tx.cli_hdr.protover)?;
+ js.set_string_from_bytes_limited("proto_version", &tx.cli_hdr.protover, SSH_MAX_BANNER_LEN)?;
if !tx.cli_hdr.swver.is_empty() {
- js.set_string_from_bytes("software_version", &tx.cli_hdr.swver)?;
+ js.set_string_from_bytes_limited("software_version", &tx.cli_hdr.swver, SSH_MAX_BANNER_LEN)?;
}
if !tx.cli_hdr.hassh.is_empty() || !tx.cli_hdr.hassh_string.is_empty() {
js.open_object("hassh")?;
@@ -42,9 +42,9 @@ fn log_ssh(tx: &SSHTransaction, js: &mut JsonBuilder) -> Result<bool, JsonError>
}
if !tx.srv_hdr.protover.is_empty() {
js.open_object("server")?;
- js.set_string_from_bytes("proto_version", &tx.srv_hdr.protover)?;
+ js.set_string_from_bytes_limited("proto_version", &tx.srv_hdr.protover, SSH_MAX_BANNER_LEN)?;
if !tx.srv_hdr.swver.is_empty() {
- js.set_string_from_bytes("software_version", &tx.srv_hdr.swver)?;
+ js.set_string_from_bytes_limited("software_version", &tx.srv_hdr.swver, SSH_MAX_BANNER_LEN)?;
}
if !tx.srv_hdr.hassh.is_empty() || !tx.srv_hdr.hassh_string.is_empty() {
js.open_object("hassh")?;
diff --git a/rust/src/ssh/ssh.rs b/rust/src/ssh/ssh.rs
index 6280e0b..a058689 100644
--- a/rust/src/ssh/ssh.rs
+++ b/rust/src/ssh/ssh.rs
@@ -46,7 +46,7 @@ pub enum SSHConnectionState {
SshStateFinished = 3,
}
-const SSH_MAX_BANNER_LEN: usize = 256;
+pub const SSH_MAX_BANNER_LEN: usize = 256;
const SSH_RECORD_HEADER_LEN: usize = 6;
const SSH_MAX_REASSEMBLED_RECORD_LEN: usize = 65535;
@@ -256,7 +256,9 @@ impl SSHState {
return r;
}
Err(Err::Incomplete(_)) => {
- return AppLayerResult::incomplete(0_u32, (input.len() + 1) as u32);
+ // we do not need to retain these bytes
+ // we parsed them, we skip them
+ return AppLayerResult::ok();
}
Err(_e) => {
SCLogDebug!("SSH invalid banner {}", _e);
diff --git a/src/Makefile.am b/src/Makefile.am
index d831700..99e40c4 100755
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1281,7 +1281,8 @@ EXTRA_DIST = \
tests/detect-tls-version.c \
tests/detect-ipaddr.c \
tests/detect.c \
- tests/stream-tcp.c
+ tests/stream-tcp.c \
+ tests/output-json-stats.c
install-headers:
mkdir -p $(DESTDIR)${includedir}/suricata
diff --git a/src/Makefile.in b/src/Makefile.in
index 7317ef0..4f15923 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -2600,7 +2600,8 @@ EXTRA_DIST = \
tests/detect-tls-version.c \
tests/detect-ipaddr.c \
tests/detect.c \
- tests/stream-tcp.c
+ tests/stream-tcp.c \
+ tests/output-json-stats.c
# set the include path found by configure
diff --git a/src/app-layer-parser.c b/src/app-layer-parser.c
index 7783c07..e9b84ed 100644
--- a/src/app-layer-parser.c
+++ b/src/app-layer-parser.c
@@ -1444,7 +1444,6 @@ int AppLayerParserParse(ThreadVars *tv, AppLayerParserThreadCtx *alp_tctx, Flow
/* set the packets to no inspection and reassembly if required */
if (pstate->flags & APP_LAYER_PARSER_NO_INSPECTION) {
AppLayerParserSetEOF(pstate);
- FlowSetNoPayloadInspectionFlag(f);
if (f->proto == IPPROTO_TCP) {
StreamTcpDisableAppLayer(f);
@@ -1466,6 +1465,9 @@ int AppLayerParserParse(ThreadVars *tv, AppLayerParserThreadCtx *alp_tctx, Flow
StreamTcpSetSessionBypassFlag(ssn);
}
}
+ } else {
+ // for TCP, this is set after flushing
+ FlowSetNoPayloadInspectionFlag(f);
}
}
diff --git a/src/autoconf.h b/src/autoconf.h
index 8c42d6c..336c550 100644
--- a/src/autoconf.h
+++ b/src/autoconf.h
@@ -696,7 +696,7 @@
#define PACKAGE_NAME "suricata"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "suricata 7.0.3"
+#define PACKAGE_STRING "suricata 7.0.4"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "suricata"
@@ -705,7 +705,7 @@
#define PACKAGE_URL ""
/* Define to the version of this package. */
-#define PACKAGE_VERSION "7.0.3"
+#define PACKAGE_VERSION "7.0.4"
/* Pcre code unit width is 8 bits */
#define PCRE2_CODE_UNIT_WIDTH 8
@@ -723,7 +723,7 @@
/* #undef PROFILING */
/* Git revision */
-#define REVISION be68bbc4a 2024-02-08
+#define REVISION d8bad3b1a 2024-03-19
/* Define to 1 if all of the C90 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
@@ -743,7 +743,7 @@
/* #undef UNITTESTS */
/* Version number of package */
-#define VERSION "7.0.3"
+#define VERSION "7.0.4"
/* Enable Windows WinDivert support for inline IDP */
/* #undef WINDIVERT */
diff --git a/src/conf-yaml-loader.c b/src/conf-yaml-loader.c
index 1bd107e..ea64563 100644
--- a/src/conf-yaml-loader.c
+++ b/src/conf-yaml-loader.c
@@ -185,7 +185,7 @@ static int ConfYamlParse(yaml_parser_t *parser, ConfNode *parent, int inseq, int
while (!done) {
if (!yaml_parser_parse(parser, &event)) {
- SCLogError("Failed to parse configuration file at line %" PRIuMAX ": %s\n",
+ SCLogError("Failed to parse configuration file at line %" PRIuMAX ": %s",
(uintmax_t)parser->problem_mark.line, parser->problem);
retval = -1;
break;
diff --git a/src/decode-pppoe.c b/src/decode-pppoe.c
index f884085..eb5e6ac 100644
--- a/src/decode-pppoe.c
+++ b/src/decode-pppoe.c
@@ -80,11 +80,6 @@ int DecodePPPOEDiscovery(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p,
return TM_ECODE_OK;
}
- /* parse any tags we have in the packet */
-
- uint32_t tag_length = 0;
- PPPOEDiscoveryTag* pppoedt = (PPPOEDiscoveryTag*) (p->pppoedh + PPPOE_DISCOVERY_HEADER_MIN_LEN);
-
uint32_t pppoe_length = SCNtohs(p->pppoedh->pppoe_length);
uint32_t packet_length = len - PPPOE_DISCOVERY_HEADER_MIN_LEN ;
@@ -97,29 +92,29 @@ int DecodePPPOEDiscovery(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p,
return TM_ECODE_OK;
}
- while (pppoedt < (PPPOEDiscoveryTag*) (pkt + (len - sizeof(PPPOEDiscoveryTag))) && pppoe_length >=4 && packet_length >=4)
- {
#ifdef DEBUG
+ /* parse any tags we have in the packet */
+
+ uint32_t tag_length = 0;
+ const uint8_t *pkt_pppoedt = pkt + PPPOE_DISCOVERY_HEADER_MIN_LEN;
+
+ // packet_length >= pppoe_length so we have enough data
+ while (pppoe_length >= sizeof(PPPOEDiscoveryTag)) {
+ PPPOEDiscoveryTag *pppoedt = (PPPOEDiscoveryTag *)pkt_pppoedt;
uint16_t tag_type = SCNtohs(pppoedt->pppoe_tag_type);
-#endif
+ // upgrade to u32 to avoid u16 overflow
tag_length = SCNtohs(pppoedt->pppoe_tag_length);
SCLogDebug ("PPPoE Tag type %x, length %"PRIu32, tag_type, tag_length);
if (pppoe_length >= (4 + tag_length)) {
pppoe_length -= (4 + tag_length);
+ pkt_pppoedt = pkt_pppoedt + (4 + tag_length);
} else {
pppoe_length = 0; // don't want an underflow
}
-
- if (packet_length >= 4 + tag_length) {
- packet_length -= (4 + tag_length);
- } else {
- packet_length = 0; // don't want an underflow
- }
-
- pppoedt = pppoedt + (4 + tag_length);
}
+#endif
return TM_ECODE_OK;
}
diff --git a/src/detect-engine-address.c b/src/detect-engine-address.c
index ac10e14..191e8f5 100644
--- a/src/detect-engine-address.c
+++ b/src/detect-engine-address.c
@@ -1362,23 +1362,28 @@ void DetectAddressMapFree(DetectEngineCtx *de_ctx)
return;
}
-static int DetectAddressMapAdd(DetectEngineCtx *de_ctx, const char *string,
- DetectAddressHead *address, bool contains_negation)
+static bool DetectAddressMapAdd(DetectEngineCtx *de_ctx, const char *string,
+ DetectAddressHead *address, bool contains_negation)
{
DetectAddressMap *map = SCCalloc(1, sizeof(*map));
if (map == NULL)
- return -1;
+ return false;
map->string = SCStrdup(string);
if (map->string == NULL) {
SCFree(map);
- return -1;
+ return false;
}
map->address = address;
map->contains_negation = contains_negation;
- BUG_ON(HashListTableAdd(de_ctx->address_table, (void *)map, 0) != 0);
- return 0;
+ if (HashListTableAdd(de_ctx->address_table, map, 0) != 0) {
+ SCFree(map->string);
+ SCFree(map);
+ return false;
+ }
+
+ return true;
}
static const DetectAddressMap *DetectAddressMapLookup(DetectEngineCtx *de_ctx,
@@ -1471,8 +1476,11 @@ const DetectAddressHead *DetectParseAddress(DetectEngineCtx *de_ctx,
*contains_negation = false;
}
- DetectAddressMapAdd((DetectEngineCtx *)de_ctx, string, head,
- *contains_negation);
+ if (!DetectAddressMapAdd((DetectEngineCtx *)de_ctx, string, head, *contains_negation)) {
+ DetectAddressHeadFree(head);
+ return NULL;
+ }
+
return head;
}
diff --git a/src/detect-engine-iponly.c b/src/detect-engine-iponly.c
index 03b4649..b163277 100644
--- a/src/detect-engine-iponly.c
+++ b/src/detect-engine-iponly.c
@@ -82,16 +82,78 @@ static IPOnlyCIDRItem *IPOnlyCIDRItemNew(void)
SCReturnPtr(item, "IPOnlyCIDRItem");
}
-static uint8_t IPOnlyCIDRItemCompare(IPOnlyCIDRItem *head,
- IPOnlyCIDRItem *item)
+/**
+ * \brief Compares two list items
+ *
+ * \retval An integer less than, equal to, or greater than zero if lhs is
+ * considered to be respectively less than, equal to, or greater than
+ * rhs.
+ */
+static int IPOnlyCIDRItemCompareReal(const IPOnlyCIDRItem *lhs, const IPOnlyCIDRItem *rhs)
{
- uint8_t i = 0;
- for (; i < head->netmask / 32 || i < 1; i++) {
- if (item->ip[i] < head->ip[i])
- //if (*(uint8_t *)(item->ip + i) < *(uint8_t *)(head->ip + i))
- return 1;
+ if (lhs->netmask == rhs->netmask) {
+ uint8_t i = 0;
+ for (; i < lhs->netmask / 32 || i < 1; i++) {
+ if (lhs->ip[i] < rhs->ip[i])
+ return -1;
+ if (lhs->ip[i] > rhs->ip[i])
+ return 1;
+ }
+ return 0;
}
- return 0;
+
+ return lhs->netmask < rhs->netmask ? -1 : 1;
+}
+
+static int IPOnlyCIDRItemCompare(const void *lhsv, const void *rhsv)
+{
+ const IPOnlyCIDRItem *lhs = *(const IPOnlyCIDRItem **)lhsv;
+ const IPOnlyCIDRItem *rhs = *(const IPOnlyCIDRItem **)rhsv;
+
+ return IPOnlyCIDRItemCompareReal(lhs, rhs);
+}
+
+static void IPOnlyCIDRListQSort(IPOnlyCIDRItem **head)
+{
+ if (unlikely(head == NULL || *head == NULL))
+ return;
+
+ // First count the number of elements in the list
+ size_t len = 0;
+ IPOnlyCIDRItem *curr = *head;
+
+ while (curr) {
+ curr = curr->next;
+ len++;
+ }
+
+ // Place a pointer to the list item in an array for sorting
+ IPOnlyCIDRItem **tmp = SCMalloc(len * sizeof(IPOnlyCIDRItem *));
+
+ if (unlikely(tmp == NULL)) {
+ SCLogError("Failed to allocate enough memory to sort IP-only CIDR items.");
+ return;
+ }
+
+ curr = *head;
+ for (size_t i = 0; i < len; i++) {
+ tmp[i] = curr;
+ curr = curr->next;
+ }
+
+ // Perform the sort using the qsort algorithm
+ qsort(tmp, len, sizeof(IPOnlyCIDRItem *), IPOnlyCIDRItemCompare);
+
+ // Update the links to the next element
+ *head = tmp[0];
+
+ for (size_t i = 0; i + 1 < len; i++) {
+ tmp[i]->next = tmp[i + 1];
+ }
+
+ tmp[len - 1]->next = NULL;
+
+ SCFree(tmp);
}
//declaration for using it already
@@ -349,11 +411,9 @@ error:
return -1;
}
-
/**
* \brief This function insert a IPOnlyCIDRItem
- * to a list of IPOnlyCIDRItems sorted by netmask
- * ascending
+ * to a list of IPOnlyCIDRItems
* \param head Pointer to the head of IPOnlyCIDRItems list
* \param item Pointer to the item to insert in the list
*
@@ -362,37 +422,12 @@ error:
static IPOnlyCIDRItem *IPOnlyCIDRItemInsertReal(IPOnlyCIDRItem *head,
IPOnlyCIDRItem *item)
{
- IPOnlyCIDRItem *it, *prev = NULL;
-
if (item == NULL)
return head;
- /* Compare with the head */
- if (item->netmask < head->netmask || (item->netmask == head->netmask && IPOnlyCIDRItemCompare(head, item))) {
- item->next = head;
- return item;
- }
-
- if (item->netmask == head->netmask && !IPOnlyCIDRItemCompare(head, item)) {
- item->next = head->next;
- head->next = item;
- return head;
- }
-
- for (prev = it = head;
- it != NULL && it->netmask < item->netmask;
- it = it->next)
- prev = it;
-
- if (it == NULL) {
- prev->next = item;
- item->next = NULL;
- } else {
- item->next = it;
- prev->next = item;
- }
-
- return head;
+ /* Always insert item as head */
+ item->next = head;
+ return item;
}
/**
@@ -1112,6 +1147,9 @@ void IPOnlyPrepare(DetectEngineCtx *de_ctx)
IPOnlyCIDRListPrint((de_ctx->io_ctx).ip_dst);
*/
+ IPOnlyCIDRListQSort(&(de_ctx->io_ctx).ip_src);
+ IPOnlyCIDRListQSort(&(de_ctx->io_ctx).ip_dst);
+
IPOnlyCIDRItem *src, *dst;
SCRadixNode *node = NULL;
@@ -1729,64 +1767,124 @@ end:
static int IPOnlyTestSig04 (void)
{
int result = 1;
-
IPOnlyCIDRItem *head = NULL;
- IPOnlyCIDRItem *new;
- new = IPOnlyCIDRItemNew();
- new->netmask= 10;
+ // Test a linked list of size 0, 1, 2, ..., 5
+ for (int size = 0; size < 6; size++) {
+ IPOnlyCIDRItem *new = NULL;
- head = IPOnlyCIDRItemInsert(head, new);
+ if (size > 0) {
+ new = IPOnlyCIDRItemNew();
+ new->netmask = 10;
+ new->ip[0] = 3;
- new = IPOnlyCIDRItemNew();
- new->netmask= 11;
+ head = IPOnlyCIDRItemInsert(head, new);
+ }
- head = IPOnlyCIDRItemInsert(head, new);
+ if (size > 1) {
+ new = IPOnlyCIDRItemNew();
+ new->netmask = 11;
- new = IPOnlyCIDRItemNew();
- new->netmask= 9;
+ head = IPOnlyCIDRItemInsert(head, new);
+ }
- head = IPOnlyCIDRItemInsert(head, new);
+ if (size > 2) {
+ new = IPOnlyCIDRItemNew();
+ new->netmask = 9;
- new = IPOnlyCIDRItemNew();
- new->netmask= 10;
+ head = IPOnlyCIDRItemInsert(head, new);
+ }
- head = IPOnlyCIDRItemInsert(head, new);
+ if (size > 3) {
+ new = IPOnlyCIDRItemNew();
+ new->netmask = 10;
+ new->ip[0] = 1;
- new = IPOnlyCIDRItemNew();
- new->netmask= 10;
+ head = IPOnlyCIDRItemInsert(head, new);
+ }
- head = IPOnlyCIDRItemInsert(head, new);
+ if (size > 4) {
+ new = IPOnlyCIDRItemNew();
+ new->netmask = 10;
+ new->ip[0] = 2;
- IPOnlyCIDRListPrint(head);
- new = head;
- if (new->netmask != 9) {
- result = 0;
- goto end;
- }
- new = new->next;
- if (new->netmask != 10) {
- result = 0;
- goto end;
- }
- new = new->next;
- if (new->netmask != 10) {
- result = 0;
- goto end;
- }
- new = new->next;
- if (new->netmask != 10) {
- result = 0;
- goto end;
- }
- new = new->next;
- if (new->netmask != 11) {
- result = 0;
- goto end;
+ head = IPOnlyCIDRItemInsert(head, new);
+ }
+
+ IPOnlyCIDRListPrint(head);
+
+ IPOnlyCIDRListQSort(&head);
+
+ if (size == 0) {
+ if (head != NULL) {
+ result = 0;
+ goto end;
+ }
+ }
+
+ /**
+ * Validate the following list entries for each size
+ * 1 - 10
+ * 2 - 10<3> 11
+ * 3 - 9 10<3> 11
+ * 4 - 9 10<1> 10<3> 11
+ * 5 - 9 10<1> 10<2> 10<3> 11
+ */
+ new = head;
+ if (size >= 3) {
+ if (new->netmask != 9) {
+ result = 0;
+ goto end;
+ }
+ new = new->next;
+ }
+
+ if (size >= 4) {
+ if (new->netmask != 10 || new->ip[0] != 1) {
+ result = 0;
+ goto end;
+ }
+ new = new->next;
+ }
+
+ if (size >= 5) {
+ if (new->netmask != 10 || new->ip[0] != 2) {
+ result = 0;
+ goto end;
+ }
+ new = new->next;
+ }
+
+ if (size >= 1) {
+ if (new->netmask != 10 || new->ip[0] != 3) {
+ result = 0;
+ goto end;
+ }
+ new = new->next;
+ }
+
+ if (size >= 2) {
+ if (new->netmask != 11) {
+ result = 0;
+ goto end;
+ }
+ new = new->next;
+ }
+
+ if (new != NULL) {
+ result = 0;
+ goto end;
+ }
+
+ IPOnlyCIDRListFree(head);
+ head = NULL;
}
end:
- IPOnlyCIDRListFree(head);
+ if (head) {
+ IPOnlyCIDRListFree(head);
+ head = NULL;
+ }
return result;
}
diff --git a/src/detect-engine-loader.c b/src/detect-engine-loader.c
index e41f277..0cdb453 100644
--- a/src/detect-engine-loader.c
+++ b/src/detect-engine-loader.c
@@ -456,6 +456,12 @@ int DetectLoadersSync(void)
done = true;
}
SCMutexUnlock(&loader->m);
+ if (!done) {
+ /* nudge thread in case it's sleeping */
+ SCCtrlMutexLock(loader->tv->ctrl_mutex);
+ pthread_cond_broadcast(loader->tv->ctrl_cond);
+ SCCtrlMutexUnlock(loader->tv->ctrl_mutex);
+ }
}
SCMutexLock(&loader->m);
if (loader->result != 0) {
@@ -511,7 +517,9 @@ static void TmThreadWakeupDetectLoaderThreads(void)
while (tv != NULL) {
if (strncmp(tv->name,"DL#",3) == 0) {
BUG_ON(tv->ctrl_cond == NULL);
+ SCCtrlMutexLock(tv->ctrl_mutex);
pthread_cond_broadcast(tv->ctrl_cond);
+ SCCtrlMutexUnlock(tv->ctrl_mutex);
}
tv = tv->next;
}
@@ -555,6 +563,9 @@ static TmEcode DetectLoaderThreadInit(ThreadVars *t, const void *initdata, void
/* pass thread data back to caller */
*data = ftd;
+ DetectLoaderControl *loader = &loaders[ftd->instance];
+ loader->tv = t;
+
return TM_ECODE_OK;
}
diff --git a/src/detect-engine-loader.h b/src/detect-engine-loader.h
index 7ffb8c8..f43ff9a 100644
--- a/src/detect-engine-loader.h
+++ b/src/detect-engine-loader.h
@@ -43,9 +43,14 @@ typedef struct DetectLoaderTask_ {
typedef struct DetectLoaderControl_ {
int id;
- int result; /* 0 for ok, error otherwise */
- SCMutex m;
- TAILQ_HEAD(, DetectLoaderTask_) task_list;
+ ThreadVars *tv; /**< loader threads threadvars - for waking them up */
+
+ /** struct to group members and mutex */
+ struct {
+ SCMutex m; /**< mutex protects result and task_list */
+ int result; /**< 0 for ok, error otherwise */
+ TAILQ_HEAD(, DetectLoaderTask_) task_list;
+ };
} DetectLoaderControl;
int DetectLoaderQueueTask(int loader_id, LoaderFunc Func, void *func_ctx, LoaderFreeFunc FreeFunc);
diff --git a/src/detect-engine-mpm.c b/src/detect-engine-mpm.c
index f091a3d..ede3e59 100644
--- a/src/detect-engine-mpm.c
+++ b/src/detect-engine-mpm.c
@@ -772,18 +772,12 @@ int SignatureHasPacketContent(const Signature *s)
{
SCEnter();
- if (s == NULL) {
- SCReturnInt(0);
- }
-
if (!(s->proto.proto[IPPROTO_TCP / 8] & 1 << (IPPROTO_TCP % 8))) {
SCReturnInt(1);
}
- if ((s->init_data != NULL && s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) ||
- (s->init_data == NULL && s->sm_arrays[DETECT_SM_LIST_PMATCH] == NULL))
- {
- SCLogDebug("no mpm");
+ if (s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) {
+ SCLogDebug("no PMATCH");
SCReturnInt(0);
}
@@ -808,18 +802,12 @@ int SignatureHasStreamContent(const Signature *s)
{
SCEnter();
- if (s == NULL) {
- SCReturnInt(0);
- }
-
if (!(s->proto.proto[IPPROTO_TCP / 8] & 1 << (IPPROTO_TCP % 8))) {
SCReturnInt(0);
}
- if ((s->init_data != NULL && s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) ||
- (s->init_data == NULL && s->sm_arrays[DETECT_SM_LIST_PMATCH] == NULL))
- {
- SCLogDebug("no mpm");
+ if (s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) {
+ SCLogDebug("no PMATCH");
SCReturnInt(0);
}
diff --git a/src/detect-engine-siggroup.c b/src/detect-engine-siggroup.c
index 67af1c1..c75a5d0 100644
--- a/src/detect-engine-siggroup.c
+++ b/src/detect-engine-siggroup.c
@@ -48,6 +48,7 @@
#include "util-error.h"
#include "util-debug.h"
+#include "util-validate.h"
#include "util-cidr.h"
#include "util-unittest.h"
#include "util-unittest-helper.h"
@@ -637,6 +638,8 @@ void SigGroupHeadSetFilestoreCount(DetectEngineCtx *de_ctx, SigGroupHead *sgh)
continue;
if (SignatureIsFilestoring(s)) {
+ // should be insured by caller that we do not overflow
+ DEBUG_VALIDATE_BUG_ON(sgh->filestore_cnt == UINT16_MAX);
sgh->filestore_cnt++;
}
}
diff --git a/src/detect-filestore.c b/src/detect-filestore.c
index c53a93d..c905f9b 100644
--- a/src/detect-filestore.c
+++ b/src/detect-filestore.c
@@ -118,7 +118,8 @@ static int FilestorePostMatchWithOptions(Packet *p, Flow *f, const DetectFilesto
switch (filestore->direction) {
case FILESTORE_DIR_DEFAULT:
rule_dir = 1;
- break;
+ // will use both sides if scope is not default
+ // fallthrough
case FILESTORE_DIR_BOTH:
toserver_dir = 1;
toclient_dir = 1;
@@ -160,16 +161,28 @@ static int FilestorePostMatchWithOptions(Packet *p, Flow *f, const DetectFilesto
AppLayerTxData *txd = AppLayerParserGetTxData(f->proto, f->alproto, txv);
DEBUG_VALIDATE_BUG_ON(txd == NULL);
if (txd != NULL) {
- txd->file_flags |= FLOWFILE_STORE;
+ if (toclient_dir) {
+ txd->file_flags |= FLOWFILE_STORE_TC;
+ }
+ if (toserver_dir) {
+ txd->file_flags |= FLOWFILE_STORE_TS;
+ }
}
}
} else if (this_flow) {
/* set in flow and AppLayerStateData */
- f->file_flags |= FLOWFILE_STORE;
-
AppLayerStateData *sd = AppLayerParserGetStateData(f->proto, f->alproto, f->alstate);
- if (sd != NULL) {
- sd->file_flags |= FLOWFILE_STORE;
+ if (toclient_dir) {
+ f->file_flags |= FLOWFILE_STORE_TC;
+ if (sd != NULL) {
+ sd->file_flags |= FLOWFILE_STORE_TC;
+ }
+ }
+ if (toserver_dir) {
+ f->file_flags |= FLOWFILE_STORE_TS;
+ if (sd != NULL) {
+ sd->file_flags |= FLOWFILE_STORE_TS;
+ }
}
} else {
FileStoreFileById(fc, file_id);
@@ -333,6 +346,11 @@ static int DetectFilestoreSetup (DetectEngineCtx *de_ctx, Signature *s, const ch
static bool warn_not_configured = false;
static uint32_t de_version = 0;
+ if (de_ctx->filestore_cnt == UINT16_MAX) {
+ SCLogError("Cannot have more than 65535 filestore signatures");
+ return -1;
+ }
+
/* Check on first-time loads (includes following a reload) */
if (!warn_not_configured || (de_ctx->version != de_version)) {
if (de_version != de_ctx->version) {
@@ -476,6 +494,7 @@ static int DetectFilestoreSetup (DetectEngineCtx *de_ctx, Signature *s, const ch
SigMatchAppendSMToList(s, sm, DETECT_SM_LIST_POSTMATCH);
s->flags |= SIG_FLAG_FILESTORE;
+ de_ctx->filestore_cnt++;
if (match)
pcre2_match_data_free(match);
diff --git a/src/detect-http-header.c b/src/detect-http-header.c
index 2803d05..cd36ea5 100644
--- a/src/detect-http-header.c
+++ b/src/detect-http-header.c
@@ -600,6 +600,13 @@ typedef struct HttpMultiBufHeaderThreadData {
static void *HttpMultiBufHeaderThreadDataInit(void *data)
{
HttpMultiBufHeaderThreadData *td = SCCalloc(1, sizeof(*td));
+
+ /* This return value check to satisfy our Cocci malloc checks. */
+ if (td == NULL) {
+ SCLogError("failed to allocate %" PRIuMAX " bytes: %s", (uintmax_t)sizeof(*td),
+ strerror(errno));
+ return NULL;
+ }
return td;
}
@@ -668,10 +675,11 @@ static InspectionBuffer *GetHttp1HeaderData(DetectEngineThreadCtx *det_ctx, cons
size_t size = size1 + size2 + 2;
if (hdr_td->items[i].len < size) {
// Use realloc, as this pointer is not freed until HttpMultiBufHeaderThreadDataFree
- hdr_td->items[i].buffer = SCRealloc(hdr_td->items[i].buffer, size);
- if (unlikely(hdr_td->items[i].buffer == NULL)) {
+ void *tmp = SCRealloc(hdr_td->items[i].buffer, size);
+ if (unlikely(tmp == NULL)) {
return NULL;
}
+ hdr_td->items[i].buffer = tmp;
}
memcpy(hdr_td->items[i].buffer, bstr_ptr(h->name), size1);
hdr_td->items[i].buffer[size1] = ':';
diff --git a/src/detect-parse.c b/src/detect-parse.c
index b5e214d..c3232b9 100644
--- a/src/detect-parse.c
+++ b/src/detect-parse.c
@@ -1540,6 +1540,7 @@ Signature *SigAlloc (void)
sig->init_data->buffers = SCCalloc(8, sizeof(SignatureInitDataBuffer));
if (sig->init_data->buffers == NULL) {
+ SCFree(sig->init_data);
SCFree(sig);
return NULL;
}
diff --git a/src/detect-tls-certs.c b/src/detect-tls-certs.c
index a020437..f233779 100644
--- a/src/detect-tls-certs.c
+++ b/src/detect-tls-certs.c
@@ -70,6 +70,7 @@ static int g_tls_certs_buffer_id = 0;
struct TlsCertsGetDataArgs {
uint32_t local_id; /**< used as index into thread inspect array */
SSLCertsChain *cert;
+ const uint8_t flags;
};
typedef struct PrefilterMpmTlsCerts {
@@ -150,7 +151,7 @@ static InspectionBuffer *TlsCertsGetData(DetectEngineThreadCtx *det_ctx,
const SSLState *ssl_state = (SSLState *)f->alstate;
const SSLStateConnp *connp;
- if (f->flags & STREAM_TOSERVER) {
+ if (cbdata->flags & STREAM_TOSERVER) {
connp = &ssl_state->client_connp;
} else {
connp = &ssl_state->server_connp;
@@ -185,7 +186,7 @@ static uint8_t DetectEngineInspectTlsCerts(DetectEngineCtx *de_ctx, DetectEngine
transforms = engine->v2.transforms;
}
- struct TlsCertsGetDataArgs cbdata = { 0, NULL };
+ struct TlsCertsGetDataArgs cbdata = { .local_id = 0, .cert = NULL, .flags = flags };
while (1)
{
@@ -222,7 +223,7 @@ static void PrefilterTxTlsCerts(DetectEngineThreadCtx *det_ctx, const void *pect
const MpmCtx *mpm_ctx = ctx->mpm_ctx;
const int list_id = ctx->list_id;
- struct TlsCertsGetDataArgs cbdata = { 0, NULL };
+ struct TlsCertsGetDataArgs cbdata = { .local_id = 0, .cert = NULL, .flags = flags };
while (1)
{
diff --git a/src/detect.h b/src/detect.h
index 0186545..587a29c 100644
--- a/src/detect.h
+++ b/src/detect.h
@@ -1039,6 +1039,9 @@ typedef struct DetectEngineCtx_ {
/* Track rule requirements for reporting after loading rules. */
SCDetectRequiresStatus *requirements;
+
+ /* number of signatures using filestore, limited as u16 */
+ uint16_t filestore_cnt;
} DetectEngineCtx;
/* Engine groups profiles (low, medium, high, custom) */
diff --git a/src/flow-timeout.c b/src/flow-timeout.c
index 6a9b707..e5d2794 100644
--- a/src/flow-timeout.c
+++ b/src/flow-timeout.c
@@ -213,7 +213,7 @@ static inline Packet *FlowForceReassemblyPseudoPacketSetup(Packet *p,
}
p->tcph->th_offx2 = 0x50;
- p->tcph->th_flags |= TH_ACK;
+ p->tcph->th_flags = 0;
p->tcph->th_win = 10;
p->tcph->th_urp = 0;
diff --git a/src/flow-worker.c b/src/flow-worker.c
index a20e053..32fbe09 100644
--- a/src/flow-worker.c
+++ b/src/flow-worker.c
@@ -391,8 +391,16 @@ static inline void FlowWorkerStreamTCPUpdate(ThreadVars *tv, FlowWorkerThreadDat
StreamTcp(tv, p, fw->stream_thread, &fw->pq);
FLOWWORKER_PROFILING_END(p, PROFILE_FLOWWORKER_STREAM);
- if (FlowChangeProto(p->flow)) {
+ // this is the first packet that sets no payload inspection
+ bool setting_nopayload =
+ p->flow->alparser &&
+ AppLayerParserStateIssetFlag(p->flow->alparser, APP_LAYER_PARSER_NO_INSPECTION) &&
+ !(p->flags & PKT_NOPAYLOAD_INSPECTION);
+ if (FlowChangeProto(p->flow) || setting_nopayload) {
StreamTcpDetectLogFlush(tv, fw->stream_thread, p->flow, p, &fw->pq);
+ if (setting_nopayload) {
+ FlowSetNoPayloadInspectionFlag(p->flow);
+ }
AppLayerParserStateSetFlag(p->flow->alparser, APP_LAYER_PARSER_EOF_TS);
AppLayerParserStateSetFlag(p->flow->alparser, APP_LAYER_PARSER_EOF_TC);
}
@@ -430,6 +438,10 @@ static inline void FlowWorkerStreamTCPUpdate(ThreadVars *tv, FlowWorkerThreadDat
TmqhOutputPacketpool(tv, x);
}
}
+ if (FlowChangeProto(p->flow) && p->flow->flags & FLOW_ACTION_DROP) {
+ // in case f->flags & FLOW_ACTION_DROP was set by one of the dequeued packets
+ PacketDrop(p, ACTION_DROP, PKT_DROP_REASON_FLOW_DROP);
+ }
}
static void FlowWorkerFlowTimeout(ThreadVars *tv, Packet *p, FlowWorkerThreadData *fw,
diff --git a/src/flow.h b/src/flow.h
index 0a730e0..9866b56 100644
--- a/src/flow.h
+++ b/src/flow.h
@@ -142,8 +142,9 @@ typedef struct AppLayerParserState_ AppLayerParserState;
#define FLOWFILE_NO_SIZE_TS BIT_U16(10)
#define FLOWFILE_NO_SIZE_TC BIT_U16(11)
-/** store all files in the flow */
-#define FLOWFILE_STORE BIT_U16(12)
+/** store files in the flow */
+#define FLOWFILE_STORE_TS BIT_U16(12)
+#define FLOWFILE_STORE_TC BIT_U16(13)
#define FLOWFILE_NONE_TS (FLOWFILE_NO_MAGIC_TS | \
FLOWFILE_NO_STORE_TS | \
diff --git a/src/output-filestore.c b/src/output-filestore.c
index dcf4c1a..d23560c 100644
--- a/src/output-filestore.c
+++ b/src/output-filestore.c
@@ -194,12 +194,8 @@ static int OutputFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet
SCLogDebug("ff %p, data %p, data_len %u", ff, data, data_len);
- char base_filename[PATH_MAX] = "";
- snprintf(base_filename, sizeof(base_filename), "%s/file.%u",
- ctx->tmpdir, ff->file_store_id);
- snprintf(filename, sizeof(filename), "%s", base_filename);
-
if (flags & OUTPUT_FILEDATA_FLAG_OPEN) {
+ snprintf(filename, sizeof(filename), "%s/file.%u", ctx->tmpdir, ff->file_store_id);
file_fd = open(filename, O_CREAT | O_TRUNC | O_NOFOLLOW | O_WRONLY,
0644);
if (file_fd == -1) {
@@ -220,6 +216,7 @@ static int OutputFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet
/* we can get called with a NULL ffd when we need to close */
} else if (data != NULL) {
if (ff->fd == -1) {
+ snprintf(filename, sizeof(filename), "%s/file.%u", ctx->tmpdir, ff->file_store_id);
file_fd = open(filename, O_APPEND | O_NOFOLLOW | O_WRONLY);
if (file_fd == -1) {
StatsIncr(tv, aft->fs_error_counter);
@@ -235,6 +232,7 @@ static int OutputFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet
if (file_fd != -1) {
ssize_t r = write(file_fd, (const void *)data, (size_t)data_len);
if (r == -1) {
+ snprintf(filename, sizeof(filename), "%s/file.%u", ctx->tmpdir, ff->file_store_id);
StatsIncr(tv, aft->fs_error_counter);
WARN_ONCE(WOT_WRITE, "Filestore (v2) failed to write to %s: %s", filename,
strerror(errno));
diff --git a/src/output-json-stats.c b/src/output-json-stats.c
index 718298e..33f98af 100644
--- a/src/output-json-stats.c
+++ b/src/output-json-stats.c
@@ -36,6 +36,7 @@
#include "util-print.h"
#include "util-time.h"
#include "util-unittest.h"
+#include "util-validate.h"
#include "util-debug.h"
#include "output.h"
@@ -265,20 +266,30 @@ json_t *StatsToJSON(const StatsTable *st, uint8_t flags)
for (x = 0; x < st->ntstats; x++) {
uint32_t offset = x * st->nstats;
+ // Stats for for this thread.
+ json_t *thread = json_object();
+ if (unlikely(thread == NULL)) {
+ json_decref(js_stats);
+ json_decref(threads);
+ return NULL;
+ }
+
/* for each counter */
for (u = offset; u < (offset + st->nstats); u++) {
if (st->tstats[u].name == NULL)
continue;
+ // Seems this holds, but assert in debug builds.
+ DEBUG_VALIDATE_BUG_ON(
+ strcmp(st->tstats[offset].tm_name, st->tstats[u].tm_name) != 0);
+
json_t *js_type = NULL;
const char *stat_name = st->tstats[u].short_name;
if (st->tstats[u].short_name == NULL) {
stat_name = st->tstats[u].name;
js_type = threads;
} else {
- char str[256];
- snprintf(str, sizeof(str), "%s.%s", st->tstats[u].tm_name, st->tstats[u].name);
- js_type = OutputStats2Json(threads, str);
+ js_type = OutputStats2Json(thread, st->tstats[u].name);
}
if (js_type != NULL) {
@@ -292,6 +303,7 @@ json_t *StatsToJSON(const StatsTable *st, uint8_t flags)
}
}
}
+ json_object_set_new(threads, st->tstats[offset].tm_name, thread);
}
json_object_set_new(js_stats, "threads", threads);
}
@@ -471,3 +483,7 @@ void JsonStatsLogRegister(void) {
"eve-log.stats", OutputStatsLogInitSub, JsonStatsLogger,
JsonStatsLogThreadInit, JsonStatsLogThreadDeinit, NULL);
}
+
+#ifdef UNITTESTS
+#include "tests/output-json-stats.c"
+#endif
diff --git a/src/output-json-stats.h b/src/output-json-stats.h
index 9b96d50..b569e30 100644
--- a/src/output-json-stats.h
+++ b/src/output-json-stats.h
@@ -35,4 +35,6 @@ TmEcode OutputEngineStatsReloadTime(json_t **jdata);
TmEcode OutputEngineStatsRuleset(json_t **jdata);
void JsonStatsLogRegister(void);
+void OutputJsonStatsRegisterTests(void);
+
#endif /* __OUTPUT_JSON_COUNTERS_H__ */
diff --git a/src/output-tx.c b/src/output-tx.c
index 18a34e7..042b424 100644
--- a/src/output-tx.c
+++ b/src/output-tx.c
@@ -339,7 +339,9 @@ static TmEcode OutputTxLog(ThreadVars *tv, Packet *p, void *thread_data)
DEBUG_VALIDATE_BUG_ON(thread_data == NULL);
if (p->flow == NULL)
return TM_ECODE_OK;
- if (!((PKT_IS_PSEUDOPKT(p)) || p->flow->flags & (FLOW_TS_APP_UPDATED | FLOW_TC_APP_UPDATED))) {
+ if (!PKT_IS_PSEUDOPKT(p) && p->app_update_direction == 0 &&
+ ((PKT_IS_TOSERVER(p) && (p->flow->flags & FLOW_TS_APP_UPDATED) == 0) ||
+ (PKT_IS_TOCLIENT(p) && (p->flow->flags & FLOW_TC_APP_UPDATED) == 0))) {
SCLogDebug("not pseudo, no app update: skip");
return TM_ECODE_OK;
}
diff --git a/src/runmode-dpdk.c b/src/runmode-dpdk.c
index 2cdf5cb..1a240aa 100644
--- a/src/runmode-dpdk.c
+++ b/src/runmode-dpdk.c
@@ -464,6 +464,9 @@ static int ConfigSetMempoolSize(DPDKIfaceConfig *iconf, intmax_t entry_int)
if (entry_int <= 0) {
SCLogError("%s: positive memory pool size is required", iconf->iface);
SCReturnInt(-ERANGE);
+ } else if (entry_int > UINT32_MAX) {
+ SCLogError("%s: memory pool size cannot exceed %" PRIu32, iconf->iface, UINT32_MAX);
+ SCReturnInt(-ERANGE);
}
iconf->mempool_size = entry_int;
@@ -484,7 +487,7 @@ static int ConfigSetMempoolCacheSize(DPDKIfaceConfig *iconf, const char *entry_s
SCReturnInt(-EINVAL);
}
- uint32_t max_cache_size = MAX(RTE_MEMPOOL_CACHE_MAX_SIZE, iconf->mempool_size / 1.5);
+ uint32_t max_cache_size = MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, iconf->mempool_size / 1.5);
iconf->mempool_cache_size = GreatestDivisorUpTo(iconf->mempool_size, max_cache_size);
SCReturnInt(0);
}
@@ -510,6 +513,9 @@ static int ConfigSetRxDescriptors(DPDKIfaceConfig *iconf, intmax_t entry_int)
if (entry_int <= 0) {
SCLogError("%s: positive number of RX descriptors is required", iconf->iface);
SCReturnInt(-ERANGE);
+ } else if (entry_int > UINT16_MAX) {
+ SCLogError("%s: number of RX descriptors cannot exceed %" PRIu16, iconf->iface, UINT16_MAX);
+ SCReturnInt(-ERANGE);
}
iconf->nb_rx_desc = entry_int;
@@ -522,6 +528,9 @@ static int ConfigSetTxDescriptors(DPDKIfaceConfig *iconf, intmax_t entry_int)
if (entry_int <= 0) {
SCLogError("%s: positive number of TX descriptors is required", iconf->iface);
SCReturnInt(-ERANGE);
+ } else if (entry_int > UINT16_MAX) {
+ SCLogError("%s: number of TX descriptors cannot exceed %" PRIu16, iconf->iface, UINT16_MAX);
+ SCReturnInt(-ERANGE);
}
iconf->nb_tx_desc = entry_int;
@@ -1424,12 +1433,17 @@ static int DeviceConfigure(DPDKIfaceConfig *iconf)
if (retval < 0)
return retval;
+ uint16_t tmp_nb_rx_desc = iconf->nb_rx_desc;
+ uint16_t tmp_nb_tx_desc = iconf->nb_tx_desc;
retval = rte_eth_dev_adjust_nb_rx_tx_desc(
iconf->port_id, &iconf->nb_rx_desc, &iconf->nb_tx_desc);
if (retval != 0) {
SCLogError("%s: failed to adjust device queue descriptors (port %u, err %d)", iconf->iface,
iconf->port_id, retval);
SCReturnInt(retval);
+ } else if (tmp_nb_rx_desc != iconf->nb_rx_desc || tmp_nb_tx_desc != iconf->nb_tx_desc) {
+ SCLogWarning("%s: device queue descriptors adjusted (RX: from %u to %u, TX: from %u to %u)",
+ iconf->iface, tmp_nb_rx_desc, iconf->nb_rx_desc, tmp_nb_tx_desc, iconf->nb_tx_desc);
}
retval = iconf->flags & DPDK_MULTICAST ? rte_eth_allmulticast_enable(iconf->port_id)
diff --git a/src/runmode-napatech.c b/src/runmode-napatech.c
index cb8f560..fe02124 100644
--- a/src/runmode-napatech.c
+++ b/src/runmode-napatech.c
@@ -200,7 +200,12 @@ static void *NapatechConfigParser(const char *device)
if (ConfGetInt("napatech.hba", &conf->hba) == 0) {
conf->hba = -1;
} else {
- SCLogWarning("Napatech Host Buffer Allocation (hba) will be deprecated in Suricata v7.0.");
+ static bool warn_once = false;
+ if (!warn_once) {
+ SCLogWarning(
+ "Napatech Host Buffer Allowance (hba) will be deprecated in Suricata v8.0.");
+ warn_once = true;
+ }
}
return (void *) conf;
}
diff --git a/src/runmode-netmap.c b/src/runmode-netmap.c
index 927dc71..e207cf0 100644
--- a/src/runmode-netmap.c
+++ b/src/runmode-netmap.c
@@ -344,7 +344,9 @@ static void *ParseNetmapConfig(const char *iface_name)
}
}
- int ring_count = NetmapGetRSSCount(aconf->iface_name);
+ int ring_count = 0;
+ if (aconf->in.real)
+ ring_count = NetmapGetRSSCount(aconf->iface_name);
if (strlen(aconf->iface_name) > 0 &&
(aconf->iface_name[strlen(aconf->iface_name) - 1] == '^' ||
aconf->iface_name[strlen(aconf->iface_name) - 1] == '*')) {
diff --git a/src/runmode-pfring.c b/src/runmode-pfring.c
index b0af83b..7f1f74f 100644
--- a/src/runmode-pfring.c
+++ b/src/runmode-pfring.c
@@ -200,6 +200,7 @@ static void *ParsePfringConfig(const char *iface)
cluster_type default_ctype = CLUSTER_FLOW;
int getctype = 0;
int bool_val;
+ const char *active_runmode = RunmodeGetActive();
if (unlikely(pfconf == NULL)) {
return NULL;
@@ -244,7 +245,9 @@ static void *ParsePfringConfig(const char *iface)
if_default = NULL;
}
- if (ConfGetChildValueWithDefault(if_root, if_default, "threads", &threadsstr) != 1) {
+ if (active_runmode && !strcmp("single", active_runmode)) {
+ pfconf->threads = 1;
+ } else if (ConfGetChildValueWithDefault(if_root, if_default, "threads", &threadsstr) != 1) {
pfconf->threads = 1;
} else if (threadsstr != NULL) {
if (strcmp(threadsstr, "auto") == 0) {
diff --git a/src/runmode-unittests.c b/src/runmode-unittests.c
index 1150bad..8ce0244 100644
--- a/src/runmode-unittests.c
+++ b/src/runmode-unittests.c
@@ -114,6 +114,8 @@
#include "decode-vntag.h"
#include "decode-vxlan.h"
+#include "output-json-stats.h"
+
#ifdef OS_WIN32
#include "win32-syscall.h"
#endif
@@ -215,6 +217,7 @@ static void RegisterUnittests(void)
#endif
SCProtoNameRegisterTests();
UtilCIDRTests();
+ OutputJsonStatsRegisterTests();
}
#endif
diff --git a/src/runmode-unix-socket.c b/src/runmode-unix-socket.c
index e695cb8..8b26990 100644
--- a/src/runmode-unix-socket.c
+++ b/src/runmode-unix-socket.c
@@ -545,7 +545,7 @@ static TmEcode UnixSocketPcapFilesCheck(void *data)
if (cfile->tenant_id > 0) {
char tstr[16];
- snprintf(tstr, sizeof(tstr), "%d", cfile->tenant_id);
+ snprintf(tstr, sizeof(tstr), "%u", cfile->tenant_id);
if (ConfSetFinal("pcap-file.tenant-id", tstr) != 1) {
SCLogError("Can not set working tenant-id to '%s'", tstr);
PcapFilesFree(cfile);
@@ -1038,7 +1038,7 @@ TmEcode UnixSocketRegisterTenant(json_t *cmd, json_t* answer, void *data)
/* setup the yaml in this loop so that it's not done by the loader
* threads. ConfYamlLoadFileWithPrefix is not thread safe. */
char prefix[64];
- snprintf(prefix, sizeof(prefix), "multi-detect.%d", tenant_id);
+ snprintf(prefix, sizeof(prefix), "multi-detect.%u", tenant_id);
if (ConfYamlLoadFileWithPrefix(filename, prefix) != 0) {
SCLogError("failed to load yaml %s", filename);
json_object_set_new(answer, "message", json_string("failed to load yaml"));
@@ -1187,7 +1187,7 @@ TmEcode UnixSocketUnregisterTenant(json_t *cmd, json_t* answer, void *data)
/* 2 remove it from the system */
char prefix[64];
- snprintf(prefix, sizeof(prefix), "multi-detect.%d", tenant_id);
+ snprintf(prefix, sizeof(prefix), "multi-detect.%u", tenant_id);
DetectEngineCtx *de_ctx = DetectEngineGetByTenantId(tenant_id);
if (de_ctx == NULL) {
diff --git a/src/source-dpdk.c b/src/source-dpdk.c
index 54503e2..cf26af5 100644
--- a/src/source-dpdk.c
+++ b/src/source-dpdk.c
@@ -564,7 +564,7 @@ static TmEcode ReceiveDPDKThreadInit(ThreadVars *tv, const void *initdata, void
if (inconsistent_numa_cnt > 0 && ptv->port_socket_id != SOCKET_ID_ANY) {
SCLogWarning("%s: NIC is on NUMA %d, %u threads on different NUMA node(s)",
dpdk_config->iface, ptv->port_socket_id, inconsistent_numa_cnt);
- } else if (ptv->port_socket_id == SOCKET_ID_ANY) {
+ } else if (ptv->port_socket_id == SOCKET_ID_ANY && rte_socket_count() > 1) {
SCLogNotice(
"%s: unable to determine NIC's NUMA node, degraded performance can be expected",
dpdk_config->iface);
diff --git a/src/source-netmap.c b/src/source-netmap.c
index 0b04b41..8e409ea 100644
--- a/src/source-netmap.c
+++ b/src/source-netmap.c
@@ -453,6 +453,7 @@ retry:
}
}
+ SCMutexUnlock(&netmap_devlist_lock);
NetmapCloseAll();
FatalError("opening devname %s failed: %s", devname, strerror(errno));
}
diff --git a/src/source-pfring.c b/src/source-pfring.c
index 96da94e..10eac2f 100644
--- a/src/source-pfring.c
+++ b/src/source-pfring.c
@@ -430,6 +430,7 @@ TmEcode ReceivePfringLoop(ThreadVars *tv, void *data, void *slot)
}
} else if (unlikely(r == 0)) {
if (suricata_ctl_flags & SURICATA_STOP) {
+ TmqhOutputPacketpool(ptv->tv, p);
SCReturnInt(TM_ECODE_OK);
}
@@ -701,6 +702,7 @@ TmEcode ReceivePfringThreadDeinit(ThreadVars *tv, void *data)
}
pfring_close(ptv->pd);
+ SCFree(ptv);
return TM_ECODE_OK;
}
diff --git a/src/suricata.c b/src/suricata.c
index d0e1049..1c5ac7c 100644
--- a/src/suricata.c
+++ b/src/suricata.c
@@ -881,9 +881,6 @@ int g_ut_covered;
void RegisterAllModules(void)
{
- // zero all module storage
- memset(tmm_modules, 0, TMM_SIZE * sizeof(TmModule));
-
/* commanders */
TmModuleUnixManagerRegister();
/* managers */
@@ -2672,6 +2669,10 @@ int PostConfLoadedSetup(SCInstance *suri)
MacSetRegisterFlowStorage();
+#ifdef HAVE_PLUGINS
+ SCPluginsLoad(suri->capture_plugin_name, suri->capture_plugin_args);
+#endif
+
LiveDeviceFinalize(); // must be after EBPF extension registration
RunModeEngineIsIPS(
@@ -2743,9 +2744,6 @@ int PostConfLoadedSetup(SCInstance *suri)
FeatureTrackingRegister(); /* must occur prior to output mod registration */
RegisterAllModules();
-#ifdef HAVE_PLUGINS
- SCPluginsLoad(suri->capture_plugin_name, suri->capture_plugin_args);
-#endif
AppLayerHtpNeedFileInspection();
StorageFinalize();
@@ -2869,6 +2867,10 @@ int InitGlobal(void)
ConfInit();
VarNameStoreInit();
+
+ // zero all module storage
+ memset(tmm_modules, 0, TMM_SIZE * sizeof(TmModule));
+
return 0;
}
@@ -2971,7 +2973,10 @@ int SuricataMain(int argc, char **argv)
goto out;
}
- SystemHugepageSnapshot *prerun_snap = SystemHugepageSnapshotCreate();
+ SystemHugepageSnapshot *prerun_snap = NULL;
+ if (run_mode == RUNMODE_DPDK)
+ prerun_snap = SystemHugepageSnapshotCreate();
+
SCSetStartTime(&suricata);
RunModeDispatch(suricata.run_mode, suricata.runmode_custom_mode,
suricata.capture_plugin_name, suricata.capture_plugin_args);
@@ -3029,13 +3034,12 @@ int SuricataMain(int argc, char **argv)
OnNotifyRunning();
PostRunStartedDetectSetup(&suricata);
-
- SystemHugepageSnapshot *postrun_snap = SystemHugepageSnapshotCreate();
- if (run_mode == RUNMODE_DPDK) // only DPDK uses hpages at the moment
+ if (run_mode == RUNMODE_DPDK) { // only DPDK uses hpages at the moment
+ SystemHugepageSnapshot *postrun_snap = SystemHugepageSnapshotCreate();
SystemHugepageEvaluateHugepages(prerun_snap, postrun_snap);
- SystemHugepageSnapshotDestroy(prerun_snap);
- SystemHugepageSnapshotDestroy(postrun_snap);
-
+ SystemHugepageSnapshotDestroy(prerun_snap);
+ SystemHugepageSnapshotDestroy(postrun_snap);
+ }
SCPledge();
SuricataMainLoop(&suricata);
diff --git a/src/tests/output-json-stats.c b/src/tests/output-json-stats.c
new file mode 100644
index 0000000..ac1336e
--- /dev/null
+++ b/src/tests/output-json-stats.c
@@ -0,0 +1,70 @@
+/* Copyright (C) 2024 Open Information Security Foundation
+ *
+ * You can copy, redistribute or modify this Program under the terms of
+ * the GNU General Public License version 2 as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include "../suricata-common.h"
+
+#include "../output-json-stats.h"
+
+#include "../util-unittest.h"
+
+static int OutputJsonStatsTest01(void)
+{
+ StatsRecord global_records[] = { { 0 }, { 0 } };
+ StatsRecord thread_records[2];
+ thread_records[0].name = "capture.kernel_packets";
+ thread_records[0].short_name = "kernel_packets";
+ thread_records[0].tm_name = "W#01-bond0.30";
+ thread_records[0].value = 42;
+ thread_records[1].name = "capture.kernel_drops";
+ thread_records[1].short_name = "kernel_drops";
+ thread_records[1].tm_name = "W#01-bond0.30";
+ thread_records[1].value = 4711;
+
+ StatsTable table = {
+ .nstats = 2,
+ .stats = &global_records[0],
+ .ntstats = 1,
+ .tstats = &thread_records[0],
+ };
+
+ json_t *r = StatsToJSON(&table, JSON_STATS_TOTALS | JSON_STATS_THREADS);
+ if (!r)
+ return 0;
+
+ // Remove variable content
+ json_object_del(r, "uptime");
+
+ char *serialized = json_dumps(r, 0);
+
+ // Cheesy comparison
+ const char *expected = "{\"threads\": {\"W#01-bond0.30\": {\"capture\": {\"kernel_packets\": "
+ "42, \"kernel_drops\": 4711}}}}";
+
+ int cmp_result = strcmp(expected, serialized);
+ if (cmp_result != 0)
+ printf("unexpected result\nexpected=%s\ngot=%s\n", expected, serialized);
+
+ free(serialized);
+ json_decref(r);
+
+ return cmp_result == 0;
+}
+
+void OutputJsonStatsRegisterTests(void)
+{
+ UtRegisterTest("OutputJsonStatsTest01", OutputJsonStatsTest01);
+}
diff --git a/src/tm-threads.c b/src/tm-threads.c
index b173cb8..1853db6 100644
--- a/src/tm-threads.c
+++ b/src/tm-threads.c
@@ -1241,13 +1241,17 @@ static int TmThreadKillThread(ThreadVars *tv)
}
if (tv->inq != NULL) {
for (int i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) {
+ SCMutexLock(&tv->inq->pq->mutex_q);
SCCondSignal(&tv->inq->pq->cond_q);
+ SCMutexUnlock(&tv->inq->pq->mutex_q);
}
SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id);
}
if (tv->ctrl_cond != NULL ) {
+ SCCtrlMutexLock(tv->ctrl_mutex);
pthread_cond_broadcast(tv->ctrl_cond);
+ SCCtrlMutexUnlock(tv->ctrl_mutex);
}
return 0;
}
@@ -1427,7 +1431,9 @@ again:
if (tv->inq != NULL) {
for (int i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) {
+ SCMutexLock(&tv->inq->pq->mutex_q);
SCCondSignal(&tv->inq->pq->cond_q);
+ SCMutexUnlock(&tv->inq->pq->mutex_q);
}
SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id);
}
@@ -1507,7 +1513,9 @@ again:
* THV_KILL flag. */
if (tv->inq != NULL) {
for (int i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) {
+ SCMutexLock(&tv->inq->pq->mutex_q);
SCCondSignal(&tv->inq->pq->cond_q);
+ SCMutexUnlock(&tv->inq->pq->mutex_q);
}
SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id);
}
@@ -2298,7 +2306,9 @@ void TmThreadsInjectFlowById(Flow *f, const int id)
/* wake up listening thread(s) if necessary */
if (tv->inq != NULL) {
+ SCMutexLock(&tv->inq->pq->mutex_q);
SCCondSignal(&tv->inq->pq->cond_q);
+ SCMutexUnlock(&tv->inq->pq->mutex_q);
} else if (tv->break_loop) {
TmThreadsCaptureBreakLoop(tv);
}
diff --git a/src/tmqh-simple.c b/src/tmqh-simple.c
index 47faed5..0bfa173 100644
--- a/src/tmqh-simple.c
+++ b/src/tmqh-simple.c
@@ -76,8 +76,11 @@ void TmqhInputSimpleShutdownHandler(ThreadVars *tv)
return;
}
- for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++)
+ for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) {
+ SCMutexLock(&tv->inq->pq->mutex_q);
SCCondSignal(&tv->inq->pq->cond_q);
+ SCMutexUnlock(&tv->inq->pq->mutex_q);
+ }
}
void TmqhOutputSimple(ThreadVars *t, Packet *p)
diff --git a/src/util-decode-mime.c b/src/util-decode-mime.c
index 5e7a8d5..eb67c3d 100644
--- a/src/util-decode-mime.c
+++ b/src/util-decode-mime.c
@@ -2439,6 +2439,7 @@ MimeDecParseState * MimeDecInitParser(void *data,
PushStack(state->stack);
if (state->stack->top == NULL) {
SCFree(state->stack);
+ SCFree(state->msg);
SCFree(state);
return NULL;
}
diff --git a/src/util-error.c b/src/util-error.c
index 01c2f9a..e3195a1 100644
--- a/src/util-error.c
+++ b/src/util-error.c
@@ -47,6 +47,7 @@ const char * SCErrorToString(SCError err)
CASE_CODE(SC_EINVAL);
CASE_CODE(SC_ELIMIT);
CASE_CODE(SC_EEXIST);
+ CASE_CODE(SC_ENOENT);
CASE_CODE (SC_ERR_MAX);
}
diff --git a/src/util-error.h b/src/util-error.h
index eaaf8cb..f1bc80d 100644
--- a/src/util-error.h
+++ b/src/util-error.h
@@ -30,6 +30,7 @@ typedef enum {
SC_EINVAL,
SC_ELIMIT,
SC_EEXIST,
+ SC_ENOENT,
SC_ERR_MAX
} SCError;
diff --git a/src/util-file.c b/src/util-file.c
index 0449a2e..89ef50c 100644
--- a/src/util-file.c
+++ b/src/util-file.c
@@ -235,8 +235,11 @@ uint16_t FileFlowFlagsToFlags(const uint16_t flow_file_flags, uint8_t direction)
uint16_t flags = 0;
if (direction == STREAM_TOSERVER) {
- if ((flow_file_flags & (FLOWFILE_NO_STORE_TS | FLOWFILE_STORE)) == FLOWFILE_NO_STORE_TS) {
+ if ((flow_file_flags & (FLOWFILE_NO_STORE_TS | FLOWFILE_STORE_TS)) ==
+ FLOWFILE_NO_STORE_TS) {
flags |= FILE_NOSTORE;
+ } else if (flow_file_flags & FLOWFILE_STORE_TS) {
+ flags |= FILE_STORE;
}
if (flow_file_flags & FLOWFILE_NO_MAGIC_TS) {
@@ -255,8 +258,11 @@ uint16_t FileFlowFlagsToFlags(const uint16_t flow_file_flags, uint8_t direction)
flags |= FILE_NOSHA256;
}
} else {
- if ((flow_file_flags & (FLOWFILE_NO_STORE_TC | FLOWFILE_STORE)) == FLOWFILE_NO_STORE_TC) {
+ if ((flow_file_flags & (FLOWFILE_NO_STORE_TC | FLOWFILE_STORE_TC)) ==
+ FLOWFILE_NO_STORE_TC) {
flags |= FILE_NOSTORE;
+ } else if (flow_file_flags & FLOWFILE_STORE_TC) {
+ flags |= FILE_STORE;
}
if (flow_file_flags & FLOWFILE_NO_MAGIC_TC) {
@@ -275,9 +281,6 @@ uint16_t FileFlowFlagsToFlags(const uint16_t flow_file_flags, uint8_t direction)
flags |= FILE_NOSHA256;
}
}
- if (flow_file_flags & FLOWFILE_STORE) {
- flags |= FILE_STORE;
- }
DEBUG_VALIDATE_BUG_ON((flags & (FILE_STORE | FILE_NOSTORE)) == (FILE_STORE | FILE_NOSTORE));
SCLogDebug("direction %02x flags %02x", direction, flags);
diff --git a/src/util-hugepages.c b/src/util-hugepages.c
index 2af74c3..5ad3519 100644
--- a/src/util-hugepages.c
+++ b/src/util-hugepages.c
@@ -24,6 +24,7 @@
#include "suricata.h"
#include "util-debug.h"
#include "util-hugepages.h"
+#include "util-path.h"
static uint16_t SystemHugepageSizesCntPerNodeGet(uint16_t node_index);
static uint16_t SystemNodeCountGet(void);
@@ -36,18 +37,28 @@ static void SystemHugepageNodeInfoDestroy(NodeInfo *n);
static void SystemHugepageNodeInfoDump(NodeInfo *n);
static void SystemHugepageSnapshotDump(SystemHugepageSnapshot *s);
+typedef enum OSHugepageAction_ {
+ OS_UNKNOWN, // unknown/unsupported OS
+ OS_LINUX_SYS_DEVICES,
+} OSHugepageAction;
+
+static OSHugepageAction SystemHugepageDetermineOS(void)
+{
+ // try Linux
+ if (SCPathExists("/sys/devices/system/node/")) {
+ return OS_LINUX_SYS_DEVICES;
+ }
+
+ return OS_UNKNOWN;
+}
+
static bool SystemHugepageSupported(void)
{
-#if !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun
- return true;
-#else
+ if (SystemHugepageDetermineOS() != OS_UNKNOWN)
+ return true;
return false;
-#endif /* !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun */
}
-// block of all hugepage-specific internal functions
-#if !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun
-
/**
* \brief Linux-specific function to detect number of NUMA nodes on the system
* \returns number of NUMA nodes, 0 on error
@@ -56,16 +67,14 @@ static uint16_t SystemNodeCountGetLinux(void)
{
char dir_path[] = "/sys/devices/system/node/";
DIR *dir = opendir(dir_path);
- if (dir == NULL) {
- SCLogError("unable to open %s", dir_path);
- return 0;
- }
+ if (dir == NULL)
+ FatalError("unable to open %s", dir_path);
uint16_t count = 0;
struct dirent *entry;
while ((entry = readdir(dir)) != NULL) {
char d_name[] = "node";
- if (entry->d_type == DT_DIR && strncmp(entry->d_name, d_name, strlen(d_name)) == 0)
+ if (SCIsRegularDirectory(entry) && strncmp(entry->d_name, d_name, strlen(d_name)) == 0)
count++;
}
closedir(dir);
@@ -83,7 +92,7 @@ static uint16_t SystemHugepageSizesCntPerNodeGetLinux(uint16_t node_index)
snprintf(dir_path, sizeof(dir_path), "/sys/devices/system/node/node%d/hugepages/", node_index);
DIR *dir = opendir(dir_path);
if (dir == NULL) {
- SCLogError("unable to open %s", dir_path);
+ SCLogInfo("unable to open %s", dir_path);
return 0;
}
@@ -91,7 +100,7 @@ static uint16_t SystemHugepageSizesCntPerNodeGetLinux(uint16_t node_index)
struct dirent *entry;
while ((entry = readdir(dir)) != NULL) {
char d_name[] = "hugepages-";
- if (entry->d_type == DT_DIR && strncmp(entry->d_name, d_name, strlen(d_name)) == 0)
+ if (SCIsRegularDirectory(entry) && strncmp(entry->d_name, d_name, strlen(d_name)) == 0)
count++;
}
closedir(dir);
@@ -111,14 +120,13 @@ static void SystemHugepagePerNodeGetHugepageSizesLinux(
char dir_path[256];
snprintf(dir_path, sizeof(dir_path), "/sys/devices/system/node/node%d/hugepages/", node_index);
DIR *dir = opendir(dir_path);
- if (dir == NULL) {
- SCLogError("unable to open %s", dir_path);
- return;
- }
+ if (dir == NULL)
+ FatalError("unable to open %s", dir_path);
+
uint16_t index = 0;
struct dirent *entry;
while ((entry = readdir(dir)) != NULL) {
- if (entry->d_type == DT_DIR && strncmp(entry->d_name, "hugepages-", 10) == 0) {
+ if (SCIsRegularDirectory(entry) && strncmp(entry->d_name, "hugepages-", 10) == 0) {
sscanf(entry->d_name, "hugepages-%ukB", &(hp_sizes[index]));
index++;
}
@@ -146,11 +154,11 @@ static int16_t SystemHugepagePerNodeGetHugepageInfoLinux(
node_index, hp_sizes[i]);
FILE *f = fopen(path, "r");
if (!f) {
- SCLogError("unable to open %s", path);
- return -SC_EEXIST;
+ SCLogInfo("unable to open %s", path);
+ return -SC_ENOENT;
}
if (fscanf(f, "%hu", &hugepages[i].allocated) != 1) {
- SCLogError("failed to read the total number of allocated hugepages (%ukB) on node %hu",
+ SCLogInfo("failed to read the total number of allocated hugepages (%ukB) on node %hu",
hp_sizes[i], node_index);
fclose(f);
return -SC_EINVAL;
@@ -162,11 +170,11 @@ static int16_t SystemHugepagePerNodeGetHugepageInfoLinux(
node_index, hp_sizes[i]);
f = fopen(path, "r");
if (!f) {
- SCLogError("unable to open %s", path);
- return -SC_EEXIST;
+ SCLogInfo("unable to open %s", path);
+ return -SC_ENOENT;
}
if (fscanf(f, "%hu", &hugepages[i].free) != 1) {
- SCLogError("failed to read the total number of free hugepages (%ukB) on node %hu",
+ SCLogInfo("failed to read the total number of free hugepages (%ukB) on node %hu",
hp_sizes[i], node_index);
fclose(f);
return -SC_EINVAL;
@@ -177,8 +185,6 @@ static int16_t SystemHugepagePerNodeGetHugepageInfoLinux(
return 0;
}
-#endif /* !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun */
-
/**
* \brief The function gathers information about hugepages on a given node
* \param[in] node_index index of the NUMA node
@@ -189,8 +195,8 @@ static int16_t SystemHugepagePerNodeGetHugepageInfo(uint16_t node_index, NodeInf
{
uint16_t hp_sizes_cnt = SystemHugepageSizesCntPerNodeGet(node_index);
if (hp_sizes_cnt == 0) {
- SCLogError("hugepages not found for node %d", node_index);
- return -SC_EEXIST;
+ SCLogInfo("hugepages not found for node %d", node_index);
+ return -SC_ENOENT;
}
uint32_t *hp_sizes = SCCalloc(hp_sizes_cnt, sizeof(*hp_sizes));
if (hp_sizes == NULL) {
@@ -202,10 +208,9 @@ static int16_t SystemHugepagePerNodeGetHugepageInfo(uint16_t node_index, NodeInf
node->num_hugepage_sizes = hp_sizes_cnt;
int16_t ret = 0;
-#if !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun
- ret = SystemHugepagePerNodeGetHugepageInfoLinux(
- node->hugepages, hp_sizes, node->num_hugepage_sizes, node_index);
-#endif /* !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun */
+ if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES)
+ ret = SystemHugepagePerNodeGetHugepageInfoLinux(
+ node->hugepages, hp_sizes, node->num_hugepage_sizes, node_index);
SCFree(hp_sizes);
return ret;
@@ -217,9 +222,8 @@ static int16_t SystemHugepagePerNodeGetHugepageInfo(uint16_t node_index, NodeInf
*/
static uint16_t SystemNodeCountGet(void)
{
-#if !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun
- return SystemNodeCountGetLinux();
-#endif /* !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun */
+ if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES)
+ return SystemNodeCountGetLinux();
return 0;
}
@@ -229,9 +233,8 @@ static uint16_t SystemNodeCountGet(void)
*/
static uint16_t SystemHugepageSizesCntPerNodeGet(uint16_t node_index)
{
-#if !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun
- return SystemHugepageSizesCntPerNodeGetLinux(node_index);
-#endif /* !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun */
+ if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES)
+ return SystemHugepageSizesCntPerNodeGetLinux(node_index);
return 0;
}
@@ -245,9 +248,8 @@ static uint16_t SystemHugepageSizesCntPerNodeGet(uint16_t node_index)
static void SystemHugepagePerNodeGetHugepageSizes(
uint16_t node_index, uint16_t hp_sizes_cnt, uint32_t *hp_sizes)
{
-#if !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun
- return SystemHugepagePerNodeGetHugepageSizesLinux(node_index, hp_sizes_cnt, hp_sizes);
-#endif /* !defined __CYGWIN__ && !defined OS_WIN32 && !defined __OpenBSD__ && !defined sun */
+ if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES)
+ SystemHugepagePerNodeGetHugepageSizesLinux(node_index, hp_sizes_cnt, hp_sizes);
}
static HugepageInfo *SystemHugepageHugepageInfoCreate(uint16_t hp_size_cnt)
@@ -325,7 +327,7 @@ SystemHugepageSnapshot *SystemHugepageSnapshotCreate(void)
uint16_t node_cnt = SystemNodeCountGet();
if (node_cnt == 0) {
- SCLogError("failed to obtain number of NUMA nodes in the system");
+ SCLogInfo("hugepage snapshot failed - cannot obtain number of NUMA nodes in the system");
return NULL;
}
NodeInfo *nodes = SCCalloc(node_cnt, sizeof(*nodes));
@@ -386,7 +388,8 @@ void SystemHugepageEvaluateHugepages(SystemHugepageSnapshot *pre_s, SystemHugepa
SCLogWarning(
"Hugepage usage decreased while it should only increase/stay the same");
} else if (prerun_hp->free > 0 && prerun_hp->free == postrun_hp->free) {
- SCLogPerf("Hugepages on NUMA node %u are unused and can be deallocated", i);
+ SCLogPerf("%ukB hugepages on NUMA node %u are unused and can be deallocated",
+ postrun_hp->size_kb, i);
} else { // assumes this is an active NUMA node because at least some hugepages were
// used
// speculative hint only for 2048kB pages as e.g. 1 GB pages can leave a lot of room
diff --git a/src/util-streaming-buffer.c b/src/util-streaming-buffer.c
index 7608b50..6ff4f43 100644
--- a/src/util-streaming-buffer.c
+++ b/src/util-streaming-buffer.c
@@ -842,16 +842,11 @@ static inline void StreamingBufferSlideToOffsetWithRegions(
r = next;
}
SCLogDebug("to_shift %p", to_shift);
- } else {
- to_shift = &sb->region;
- SCLogDebug("shift start region %p", to_shift);
- }
- // this region is main, or will xfer its buffer to main
- if (to_shift) {
- SCLogDebug("main: offset %" PRIu64 " buf %p size %u offset %u", to_shift->stream_offset,
- to_shift->buf, to_shift->buf_size, to_shift->buf_offset);
- if (to_shift != &sb->region) {
+ // this region is main, or will xfer its buffer to main
+ if (to_shift && to_shift != &sb->region) {
+ SCLogDebug("main: offset %" PRIu64 " buf %p size %u offset %u", to_shift->stream_offset,
+ to_shift->buf, to_shift->buf_size, to_shift->buf_offset);
DEBUG_VALIDATE_BUG_ON(sb->region.buf != NULL);
sb->region.buf = to_shift->buf;
@@ -860,12 +855,20 @@ static inline void StreamingBufferSlideToOffsetWithRegions(
sb->region.buf_size = to_shift->buf_size;
sb->region.next = to_shift->next;
+ BUG_ON(to_shift == &sb->region);
FREE(cfg, to_shift, sizeof(*to_shift));
to_shift = &sb->region;
sb->regions--;
DEBUG_VALIDATE_BUG_ON(sb->regions == 0);
}
+ } else {
+ to_shift = &sb->region;
+ SCLogDebug("shift start region %p", to_shift);
+ }
+
+ // this region is main, or will xfer its buffer to main
+ if (to_shift) {
// Do the shift. If new region is exactly at the slide offset we can skip this.
DEBUG_VALIDATE_BUG_ON(to_shift->stream_offset > slide_offset);
const uint32_t s = slide_offset - to_shift->stream_offset;
diff --git a/suricata-update/.github/PULL_REQUEST_TEMPLATE.md b/suricata-update/.github/PULL_REQUEST_TEMPLATE.md
index 40471df..5bf7005 100644
--- a/suricata-update/.github/PULL_REQUEST_TEMPLATE.md
+++ b/suricata-update/.github/PULL_REQUEST_TEMPLATE.md
@@ -2,10 +2,9 @@ Make sure these boxes are signed before submitting your Pull Request
-- thank you.
- [ ] I have read the contributing guide lines at
- https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Contributing
+ https://docs.suricata.io/en/latest/devguide/codebase/contributing/contribution-process.html
- [ ] I have signed the Open Information Security Foundation
- contribution agreement at
- https://suricata-ids.org/about/contribution-agreement/
+ contribution agreement at https://suricata.io/about/contribution-agreement/
- [ ] I have updated the user guide (in doc/userguide/) to reflect the
changes made (if applicable)
diff --git a/suricata-update/.github/workflows/tests.yml b/suricata-update/.github/workflows/tests.yml
index 96a72d5..22a19f3 100644
--- a/suricata-update/.github/workflows/tests.yml
+++ b/suricata-update/.github/workflows/tests.yml
@@ -4,6 +4,12 @@ on:
- push
- pull_request
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+permissions: read-all
+
jobs:
alma-9:
@@ -70,26 +76,26 @@ jobs:
- name: Python 3 integration tests
run: PYTHONPATH=. python3 ./tests/integration_tests.py
- fedora-38:
- name: Fedora 38
+ fedora-39:
+ name: Fedora 39
runs-on: ubuntu-latest
- container: fedora:38
+ container: fedora:39
steps:
- run: |
dnf -y install \
python3 \
python3-pytest \
python3-pyyaml
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Python 3 unit tests
run: PYTHONPATH=. pytest-3
- name: Python 3 integration tests
run: PYTHONPATH=. python3 ./tests/integration_tests.py
- fedora-37:
- name: Fedora 37
+ fedora-38:
+ name: Fedora 38
runs-on: ubuntu-latest
- container: fedora:37
+ container: fedora:38
steps:
- run: |
dnf -y install \
diff --git a/suricata-update/.readthedocs.yaml b/suricata-update/.readthedocs.yaml
new file mode 100644
index 0000000..635dca4
--- /dev/null
+++ b/suricata-update/.readthedocs.yaml
@@ -0,0 +1,17 @@
+version: 2
+
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.11"
+
+python:
+ install:
+ - requirements: ./requirements.txt
+
+sphinx:
+ builder: html
+ configuration: doc/conf.py
+ fail_on_warning: false
+
+formats: all
diff --git a/suricata-update/CHANGELOG.md b/suricata-update/CHANGELOG.md
index a033b02..03310ca 100644
--- a/suricata-update/CHANGELOG.md
+++ b/suricata-update/CHANGELOG.md
@@ -1,5 +1,25 @@
# Change Log
+## 1.3.2 - 2024-03-14
+- Fix copying of file hash lists which was broken in the dataset fix
+ as part of ticket #6833:
+ https://redmine.openinfosecfoundation.org/issues/6854
+
+## 1.3.1 - 2024-03-11
+- Fix detecting dataset "load" when preceded by a space:
+ https://redmine.openinfosecfoundation.org/issues/6777
+- If no Suricata is found, Suricata-Update will assume version 6.0.0
+ instead of 4.0.0.
+- Handle URLs of bare files that don't end in .rules:
+ https://redmine.openinfosecfoundation.org/issues/3664
+- Don't base dataset filenames on the contents of the file, but
+ instead the filename path:
+ https://redmine.openinfosecfoundation.org/issues/6763
+- Give each file in a source a unique filename by prefixing the files
+ with a hash of the URL to prevent duplicate filenames from
+ cloberring each other, in particular dataset files:
+ https://redmine.openinfosecfoundation.org/issues/6833
+
## 1.3.0 - 2023-07-07
- Fix loading of configuration files specified in update.yaml:
diff --git a/suricata-update/Makefile b/suricata-update/Makefile
index 26f2753..e0ceb8f 100644
--- a/suricata-update/Makefile
+++ b/suricata-update/Makefile
@@ -253,10 +253,10 @@ OTOOL64 =
PACKAGE = suricata
PACKAGE_BUGREPORT =
PACKAGE_NAME = suricata
-PACKAGE_STRING = suricata 7.0.3
+PACKAGE_STRING = suricata 7.0.4
PACKAGE_TARNAME = suricata
PACKAGE_URL =
-PACKAGE_VERSION = 7.0.3
+PACKAGE_VERSION = 7.0.4
PATH_SEPARATOR = :
PCAP_CFLAGS = -I/usr/include
PCAP_LIBS = -lpcap
@@ -280,7 +280,7 @@ SHELL = /bin/bash
SPHINX_BUILD = /usr/bin/sphinx-build
STRIP = strip
SURICATA_UPDATE_DIR = suricata-update
-VERSION = 7.0.3
+VERSION = 7.0.4
abs_builddir = /builds/dev/suricata/suricata-update
abs_srcdir = /builds/dev/suricata/suricata-update
abs_top_builddir = /builds/dev/suricata
diff --git a/suricata-update/doc/quickstart.rst b/suricata-update/doc/quickstart.rst
index 012b4e3..bf57de5 100644
--- a/suricata-update/doc/quickstart.rst
+++ b/suricata-update/doc/quickstart.rst
@@ -120,7 +120,7 @@ This command will:
and do not need to exist.
* Download the Emerging Threats Open ruleset for your version of
- Suricata, defaulting to 4.0.0 if not found.
+ Suricata, defaulting to 6.0.0 if not found.
* Apply enable, disable, drop and modify filters as loaded above.
diff --git a/suricata-update/suricata/update/data/index.py b/suricata-update/suricata/update/data/index.py
index 48d4ebb..02a9c4f 100644
--- a/suricata-update/suricata/update/data/index.py
+++ b/suricata-update/suricata/update/data/index.py
@@ -51,6 +51,28 @@ index = { 'sources': { 'et/open': { 'description': 'Proofpoint ET Open is
'support-url': 'https://redmine.openinfosecfoundation.org/',
'url': 'https://openinfosecfoundation.org/rules/trafficid/trafficid.rules',
'vendor': 'OISF'},
+ 'pawpatrules': { 'checksum': False,
+ 'description': 'PAW Patrules ruleset '
+ 'permit to detect many '
+ 'events on\n'
+ 'network. Suspicious '
+ 'flow, malicious tool, '
+ 'unsuported and\n'
+ 'vulnerable system, known '
+ 'threat actors with '
+ 'various IOCs,\n'
+ 'lateral movement, bad '
+ 'practice, shadow IT... '
+ 'Rules are\n'
+ 'frequently updated.\n',
+ 'homepage': 'https://pawpatrules.fr/',
+ 'license': 'CC-BY-SA-4.0',
+ 'min-version': '6.0.0',
+ 'summary': 'PAW Patrules is a collection '
+ 'of rules for IDPS / NSM '
+ 'Suricata engine',
+ 'url': 'https://rules.pawpatrules.fr/suricata/paw-patrules.tar.gz',
+ 'vendor': 'pawpatrules'},
'ptresearch/attackdetection': { 'description': 'The '
'Attack '
'Detection '
@@ -261,6 +283,184 @@ index = { 'sources': { 'et/open': { 'description': 'Proofpoint ET Open is
'support-url': 'https://discord.com/channels/911231224448712714/911238451842666546',
'url': 'https://ti.stamus-networks.io/open/stamus-lateral-rules.tar.gz',
'vendor': 'Stamus Networks'},
+ 'stamus/nrd-14-open': { 'description': 'Newly Registered '
+ 'Domains list '
+ '(last 14 days) to '
+ 'match on DNS, TLS '
+ 'and HTTP '
+ 'communication.\n'
+ 'Produced by '
+ 'Stamus Labs '
+ 'research team.\n',
+ 'license': 'Commercial',
+ 'min-version': '6.0.0',
+ 'parameters': { 'secret-code': { 'prompt': 'Stamus '
+ 'Networks '
+ 'License '
+ 'code'}},
+ 'subscribe-url': 'https://www.stamus-networks.com/stamus-labs/subscribe-to-threat-intel-feed',
+ 'summary': 'Newly Registered '
+ 'Domains Open only - '
+ '14 day list, complete',
+ 'url': 'https://ti.stamus-networks.io/%(secret-code)s/sti-domains-nrd-14.tar.gz',
+ 'vendor': 'Stamus Networks'},
+ 'stamus/nrd-30-open': { 'description': 'Newly Registered '
+ 'Domains list '
+ '(last 30 days) to '
+ 'match on DNS, TLS '
+ 'and HTTP '
+ 'communication.\n'
+ 'Produced by '
+ 'Stamus Labs '
+ 'research team.\n',
+ 'license': 'Commercial',
+ 'min-version': '6.0.0',
+ 'parameters': { 'secret-code': { 'prompt': 'Stamus '
+ 'Networks '
+ 'License '
+ 'code'}},
+ 'subscribe-url': 'https://www.stamus-networks.com/stamus-labs/subscribe-to-threat-intel-feed',
+ 'summary': 'Newly Registered '
+ 'Domains Open only - '
+ '30 day list, complete',
+ 'url': 'https://ti.stamus-networks.io/%(secret-code)s/sti-domains-nrd-30.tar.gz',
+ 'vendor': 'Stamus Networks'},
+ 'stamus/nrd-entropy-14-open': { 'description': 'Suspicious '
+ 'Newly '
+ 'Registered '
+ 'Domains '
+ 'list with '
+ 'high '
+ 'entropy '
+ '(last 14 '
+ 'days) to '
+ 'match on '
+ 'DNS, TLS '
+ 'and HTTP '
+ 'communication.\n'
+ 'Produced '
+ 'by Stamus '
+ 'Labs '
+ 'research '
+ 'team.\n',
+ 'license': 'Commercial',
+ 'min-version': '6.0.0',
+ 'parameters': { 'secret-code': { 'prompt': 'Stamus '
+ 'Networks '
+ 'License '
+ 'code'}},
+ 'subscribe-url': 'https://www.stamus-networks.com/stamus-labs/subscribe-to-threat-intel-feed',
+ 'summary': 'Newly '
+ 'Registered '
+ 'Domains Open '
+ 'only - 14 day '
+ 'list, high '
+ 'entropy',
+ 'url': 'https://ti.stamus-networks.io/%(secret-code)s/sti-domains-entropy-14.tar.gz',
+ 'vendor': 'Stamus '
+ 'Networks'},
+ 'stamus/nrd-entropy-30-open': { 'description': 'Suspicious '
+ 'Newly '
+ 'Registered '
+ 'Domains '
+ 'list with '
+ 'high '
+ 'entropy '
+ '(last 30 '
+ 'days) to '
+ 'match on '
+ 'DNS, TLS '
+ 'and HTTP '
+ 'communication.\n'
+ 'Produced '
+ 'by Stamus '
+ 'Labs '
+ 'research '
+ 'team.\n',
+ 'license': 'Commercial',
+ 'min-version': '6.0.0',
+ 'parameters': { 'secret-code': { 'prompt': 'Stamus '
+ 'Networks '
+ 'License '
+ 'code'}},
+ 'subscribe-url': 'https://www.stamus-networks.com/stamus-labs/subscribe-to-threat-intel-feed',
+ 'summary': 'Newly '
+ 'Registered '
+ 'Domains Open '
+ 'only - 30 day '
+ 'list, high '
+ 'entropy',
+ 'url': 'https://ti.stamus-networks.io/%(secret-code)s/sti-domains-entropy-30.tar.gz',
+ 'vendor': 'Stamus '
+ 'Networks'},
+ 'stamus/nrd-phishing-14-open': { 'description': 'Suspicious '
+ 'Newly '
+ 'Registered '
+ 'Domains '
+ 'Phishing '
+ 'list '
+ '(last 14 '
+ 'days) to '
+ 'match on '
+ 'DNS, TLS '
+ 'and HTTP '
+ 'communication.\n'
+ 'Produced '
+ 'by '
+ 'Stamus '
+ 'Labs '
+ 'research '
+ 'team.\n',
+ 'license': 'Commercial',
+ 'min-version': '6.0.0',
+ 'parameters': { 'secret-code': { 'prompt': 'Stamus '
+ 'Networks '
+ 'License '
+ 'code'}},
+ 'subscribe-url': 'https://www.stamus-networks.com/stamus-labs/subscribe-to-threat-intel-feed',
+ 'summary': 'Newly '
+ 'Registered '
+ 'Domains Open '
+ 'only - 14 '
+ 'day list, '
+ 'phishing',
+ 'url': 'https://ti.stamus-networks.io/%(secret-code)s/sti-domains-phishing-14.tar.gz',
+ 'vendor': 'Stamus '
+ 'Networks'},
+ 'stamus/nrd-phishing-30-open': { 'description': 'Suspicious '
+ 'Newly '
+ 'Registered '
+ 'Domains '
+ 'Phishing '
+ 'list '
+ '(last 30 '
+ 'days) to '
+ 'match on '
+ 'DNS, TLS '
+ 'and HTTP '
+ 'communication.\n'
+ 'Produced '
+ 'by '
+ 'Stamus '
+ 'Labs '
+ 'research '
+ 'team.\n',
+ 'license': 'Commercial',
+ 'min-version': '6.0.0',
+ 'parameters': { 'secret-code': { 'prompt': 'Stamus '
+ 'Networks '
+ 'License '
+ 'code'}},
+ 'subscribe-url': 'https://www.stamus-networks.com/stamus-labs/subscribe-to-threat-intel-feed',
+ 'summary': 'Newly '
+ 'Registered '
+ 'Domains Open '
+ 'only - 30 '
+ 'day list, '
+ 'phishing',
+ 'url': 'https://ti.stamus-networks.io/%(secret-code)s/sti-domains-phishing-30.tar.gz',
+ 'vendor': 'Stamus '
+ 'Networks'},
'tgreen/hunting': { 'checksum': False,
'description': 'Heuristic ruleset for '
'hunting. Focus on '
diff --git a/suricata-update/suricata/update/engine.py b/suricata-update/suricata/update/engine.py
index c57da82..22ad9b3 100644
--- a/suricata-update/suricata/update/engine.py
+++ b/suricata-update/suricata/update/engine.py
@@ -135,7 +135,7 @@ def get_path(program="suricata"):
return None
def parse_version(buf):
- m = re.search("((\d+)\.(\d+)(\.(\d+))?([\w\-]+)?)", str(buf).strip())
+ m = re.search(r"((\d+)\.(\d+)(\.(\d+))?([\w\-]+)?)", str(buf).strip())
if m:
full = m.group(1)
major = int(m.group(2))
diff --git a/suricata-update/suricata/update/main.py b/suricata-update/suricata/update/main.py
index 4a0e7a6..18af7a8 100644
--- a/suricata-update/suricata/update/main.py
+++ b/suricata-update/suricata/update/main.py
@@ -88,7 +88,7 @@ else:
logger = logging.getLogger()
# If Suricata is not found, default to this version.
-DEFAULT_SURICATA_VERSION = "4.0.0"
+DEFAULT_SURICATA_VERSION = "6.0.0"
# The default filename to use for the output rule file. This is a
# single file concatenating all input rule files together.
@@ -235,6 +235,8 @@ class Fetch:
# The file is not an archive, treat it as an individual file.
basename = os.path.basename(filename).split("-", 1)[1]
+ if not basename.endswith(".rules"):
+ basename = "{}.rules".format(basename)
files = {}
files[basename] = open(filename, "rb").read()
return files
@@ -435,8 +437,7 @@ def manage_classification(suriconf, files):
def handle_dataset_files(rule, dep_files):
if not rule.enabled:
return
-
- dataset_load = [el.strip() for el in rule.dataset.split(",") if el.startswith("load")]
+ dataset_load = [el for el in (el.strip() for el in rule.dataset.split(",")) if el.startswith("load")]
if not dataset_load:
# No dataset load found.
return
@@ -446,7 +447,7 @@ def handle_dataset_files(rule, dep_files):
prefix = os.path.dirname(rule.group)
# Construct the source filename.
- source_filename = "{}/{}".format(prefix, dataset_filename)
+ source_filename = os.path.join(prefix, dataset_filename)
# If a source filename starts with a "/", look for it on the filesystem. The archive
# unpackers will take care of removing a leading / so this shouldn't happen for
@@ -464,9 +465,9 @@ def handle_dataset_files(rule, dep_files):
return
dataset_contents = dep_files[source_filename]
- content_hash = hashlib.md5(dataset_contents).hexdigest()
- new_rule = re.sub("(dataset.*?load\s+){}".format(dataset_filename), "\g<1>datasets/{}".format(content_hash), rule.format())
- dest_filename = os.path.join(config.get_output_dir(), "datasets", content_hash)
+ source_filename_hash = hashlib.md5(source_filename.encode()).hexdigest()
+ new_rule = re.sub(r"(dataset.*?load\s+){}".format(dataset_filename), r"\g<1>datasets/{}".format(source_filename_hash), rule.format())
+ dest_filename = os.path.join(config.get_output_dir(), "datasets", source_filename_hash)
dest_dir = os.path.dirname(dest_filename)
logger.debug("Copying dataset file {} to {}".format(dataset_filename, dest_filename))
try:
@@ -482,10 +483,19 @@ def handle_filehash_files(rule, dep_files, fhash):
if not rule.enabled:
return
filehash_fname = rule.get(fhash)
- filename = [fname for fname, content in dep_files.items() if os.path.join(*(fname.split(os.path.sep)[1:])) == filehash_fname]
- if filename:
+
+ # Get the directory name the rule is from.
+ prefix = os.path.dirname(rule.group)
+
+ source_filename = os.path.join(prefix, filehash_fname)
+ dest_filename = source_filename[len(prefix) + len(os.path.sep):]
+ logger.debug("dest_filename={}".format(dest_filename))
+
+ if source_filename not in dep_files:
+ logger.error("{} file {} was not found".format(fhash, filehash_fname))
+ else:
logger.debug("Copying %s file %s to output directory" % (fhash, filehash_fname))
- filepath = os.path.join(config.get_state_dir(), os.path.dirname(filename[0]))
+ filepath = os.path.join(config.get_output_dir(), os.path.dirname(dest_filename))
logger.debug("filepath: %s" % filepath)
try:
os.makedirs(filepath)
@@ -493,11 +503,10 @@ def handle_filehash_files(rule, dep_files, fhash):
if oserr.errno != errno.EEXIST:
logger.error(oserr)
sys.exit(1)
- logger.debug("output fname: %s" % os.path.join(filepath, os.path.basename(filehash_fname)))
- with open(os.path.join(filepath, os.path.basename(filehash_fname)), "w+") as fp:
- fp.write(dep_files[os.path.join("rules", filehash_fname)].decode("utf-8"))
- else:
- logger.error("{} file {} was not found".format(fhash, filehash_fname))
+ output_filename = os.path.join(filepath, os.path.basename(filehash_fname))
+ logger.debug("output fname: %s" % output_filename)
+ with open(output_filename, "w") as fp:
+ fp.write(dep_files[source_filename].decode("utf-8"))
def write_merged(filename, rulemap, dep_files):
@@ -700,9 +709,9 @@ def resolve_flowbits(rulemap, disabled_rules):
class ThresholdProcessor:
patterns = [
- re.compile("\s+(re:\"(.*)\")"),
- re.compile("\s+(re:(.*?)),.*"),
- re.compile("\s+(re:(.*))"),
+ re.compile(r"\s+(re:\"(.*)\")"),
+ re.compile(r"\s+(re:(.*?)),.*"),
+ re.compile(r"\s+(re:(.*))"),
]
def extract_regex(self, buf):
@@ -984,9 +993,14 @@ def load_sources(suricata_version):
# Now download each URL.
files = []
for url in urls:
+
+ # To de-duplicate filenames, add a prefix that is a hash of the URL.
+ prefix = hashlib.md5(url[0].encode()).hexdigest()
source_files = Fetch().run(url)
for key in source_files:
- files.append(SourceFile(key, source_files[key]))
+ content = source_files[key]
+ key = os.path.join(prefix, key)
+ files.append(SourceFile(key, content))
# Now load local rules.
if config.get("local") is not None:
@@ -1184,7 +1198,7 @@ def _main():
# Disable rule that are for app-layers that are not enabled.
if suriconf:
for key in suriconf.keys():
- m = re.match("app-layer\.protocols\.([^\.]+)\.enabled", key)
+ m = re.match(r"app-layer\.protocols\.([^\.]+)\.enabled", key)
if m:
proto = m.group(1)
if not suriconf.is_true(key, ["detection-only"]):
diff --git a/suricata-update/suricata/update/matchers.py b/suricata-update/suricata/update/matchers.py
index e886c79..56a9e29 100644
--- a/suricata-update/suricata/update/matchers.py
+++ b/suricata-update/suricata/update/matchers.py
@@ -251,7 +251,7 @@ class ModifyRuleFilter(object):
pattern = re.compile(a)
# Convert Oinkmaster backticks to Python.
- b = re.sub("\$\{(\d+)\}", "\\\\\\1", b)
+ b = re.sub(r"\$\{(\d+)\}", "\\\\\\1", b)
return cls(matcher, pattern, b)
@@ -269,7 +269,7 @@ class DropRuleFilter(object):
def run(self, rule):
drop_rule = suricata.update.rule.parse(re.sub(
- "^\w+", "drop", rule.raw))
+ r"^\w+", "drop", rule.raw))
drop_rule.enabled = rule.enabled
return drop_rule
@@ -284,7 +284,7 @@ class AddMetadataFilter(object):
return self.matcher.match(rule)
def run(self, rule):
- new_rule_string = re.sub(";\s*\)$", "; metadata: {} {};)".format(self.key, self.val), rule.format())
+ new_rule_string = re.sub(r";\s*\)$", "; metadata: {} {};)".format(self.key, self.val), rule.format())
new_rule = suricata.update.rule.parse(new_rule_string, rule.group)
if not new_rule:
logger.error("Rule is not valid after adding metadata: [{}]: {}".format(rule.idstr, new_rule_string))
diff --git a/suricata-update/suricata/update/osinfo.py b/suricata-update/suricata/update/osinfo.py
index 82816bc..c3e417b 100644
--- a/suricata-update/suricata/update/osinfo.py
+++ b/suricata-update/suricata/update/osinfo.py
@@ -27,7 +27,7 @@ def parse_os_release(filename="/etc/os-release"):
with open(filename) as fileobj:
for line in fileobj:
line = line.strip()
- m = re.match("^(\w+)=\"?(.*?)\"?$", line)
+ m = re.match(r"^(\w+)=\"?(.*?)\"?$", line)
if m:
os_release[m.group(1)] = m.group(2)
return os_release
diff --git a/suricata-update/suricata/update/rule.py b/suricata-update/suricata/update/rule.py
index 42c673e..169af6c 100644
--- a/suricata-update/suricata/update/rule.py
+++ b/suricata-update/suricata/update/rule.py
@@ -436,4 +436,4 @@ def parse_var_names(var):
""" Parse out the variable names from a string. """
if var is None:
return []
- return re.findall("\$([\w_]+)", var)
+ return re.findall(r"\$([\w_]+)", var)
diff --git a/suricata-update/suricata/update/version.py b/suricata-update/suricata/update/version.py
index 1cdf5a1..75d1205 100644
--- a/suricata-update/suricata/update/version.py
+++ b/suricata-update/suricata/update/version.py
@@ -4,4 +4,4 @@
# Alpha: 1.0.0a1
# Development: 1.0.0dev0
# Release candidate: 1.0.0rc1
-version = "1.3.0"
+version = "1.3.2"
diff --git a/suricata-update/tests/integration_tests.py b/suricata-update/tests/integration_tests.py
index 8970585..c4b119b 100755
--- a/suricata-update/tests/integration_tests.py
+++ b/suricata-update/tests/integration_tests.py
@@ -118,6 +118,15 @@ run(common_args + [
"testing-header-with-spaces", "file:///doesnotexist"
])
+run(common_args + [
+ "add-source",
+ "suricata-test-rules",
+ "file://{}/tests/suricata-test-rules.zip".format(os.getcwd()),
+])
+run(common_args)
+assert(os.path.exists(os.path.join(DATA_DIR, "rules/testmyids.md5")))
+assert(os.path.exists(os.path.join(DATA_DIR, "rules/testmyids.sha1")))
+assert(os.path.exists(os.path.join(DATA_DIR, "rules/testmyids.sha256")))
class IntegrationTest:
def __init__(self, configs={}):
diff --git a/suricata-update/tests/suricata-test-rules.zip b/suricata-update/tests/suricata-test-rules.zip
new file mode 100644
index 0000000..4f834f8
--- /dev/null
+++ b/suricata-update/tests/suricata-test-rules.zip
Binary files differ
diff --git a/suricata-update/tests/test_main.py b/suricata-update/tests/test_main.py
index 86fa486..919b88b 100644
--- a/suricata-update/tests/test_main.py
+++ b/suricata-update/tests/test_main.py
@@ -127,7 +127,7 @@ class ModifyRuleFilterTestCase(unittest.TestCase):
def test_id_match(self):
rule0 = suricata.update.rule.parse(self.rule_string)
- line = '2020757 "\|0d 0a\|" "|ff ff|"'
+ line = r'2020757 "\|0d 0a\|" "|ff ff|"'
rule_filter = matchers_mod.ModifyRuleFilter.parse(line)
self.assertTrue(rule_filter != None)
self.assertTrue(rule_filter.match(rule0))
@@ -138,7 +138,7 @@ class ModifyRuleFilterTestCase(unittest.TestCase):
def test_re_match(self):
rule0 = suricata.update.rule.parse(self.rule_string)
- line = 're:classtype:trojan-activity "\|0d 0a\|" "|ff ff|"'
+ line = r're:classtype:trojan-activity "\|0d 0a\|" "|ff ff|"'
rule_filter = matchers_mod.ModifyRuleFilter.parse(line)
self.assertTrue(rule_filter != None)
self.assertTrue(rule_filter.match(rule0))
diff --git a/suricata-update/tox.ini b/suricata-update/tox.ini
index 5ce1245..3200b2d 100644
--- a/suricata-update/tox.ini
+++ b/suricata-update/tox.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = py27, py36, py37, py38
+envlist = py27, py36, py37, py38, py39, py310, py311
[testenv]
commands = pytest