summaryrefslogtreecommitdiffstats
path: root/src/pcap-thread
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-03-04 19:22:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-03-04 20:43:22 +0000
commit22c74419e2c258319bc723351876604b3304604b (patch)
tree8c799a78d53f67388fdf42900657eda617c1306a /src/pcap-thread
parentInitial commit. (diff)
downloaddnscap-22c74419e2c258319bc723351876604b3304604b.tar.xz
dnscap-22c74419e2c258319bc723351876604b3304604b.zip
Adding upstream version 2.0.0+debian.upstream/2.0.0+debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/pcap-thread')
-rw-r--r--src/pcap-thread/m4/ax_pcap_thread.m415
-rw-r--r--src/pcap-thread/m4/ax_pthread.m4485
-rw-r--r--src/pcap-thread/pcap_thread.c3818
-rw-r--r--src/pcap-thread/pcap_thread.h640
-rw-r--r--src/pcap-thread/pcap_thread_ext_frag.c1013
-rw-r--r--src/pcap-thread/pcap_thread_ext_frag.h131
6 files changed, 6102 insertions, 0 deletions
diff --git a/src/pcap-thread/m4/ax_pcap_thread.m4 b/src/pcap-thread/m4/ax_pcap_thread.m4
new file mode 100644
index 0000000..8831822
--- /dev/null
+++ b/src/pcap-thread/m4/ax_pcap_thread.m4
@@ -0,0 +1,15 @@
+AC_DEFUN([AX_PCAP_THREAD_PCAP], [
+ AC_HEADER_TIME
+ AC_CHECK_LIB([pcap], [pcap_open_live], [], AC_MSG_ERROR([libpcap not found]))
+ AC_CHECK_HEADER([pcap/pcap.h], [], [AC_MSG_ERROR([libpcap header not found])])
+ AC_CHECK_HEADERS([endian.h sys/endian.h machine/endian.h sys/time.h])
+ AC_CHECK_FUNCS([pcap_create pcap_set_tstamp_precision pcap_set_immediate_mode])
+ AC_CHECK_FUNCS([pcap_set_tstamp_type pcap_setdirection sched_yield])
+ AC_CHECK_FUNCS([pcap_open_offline_with_tstamp_precision pcap_activate])
+ AC_CHECK_TYPES([pcap_direction_t], [], [], [[#include <pcap/pcap.h>]])
+])
+
+AC_DEFUN([AX_PCAP_THREAD], [
+ AX_PTHREAD
+ AX_PCAP_THREAD_PCAP
+])
diff --git a/src/pcap-thread/m4/ax_pthread.m4 b/src/pcap-thread/m4/ax_pthread.m4
new file mode 100644
index 0000000..4c4051e
--- /dev/null
+++ b/src/pcap-thread/m4/ax_pthread.m4
@@ -0,0 +1,485 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_pthread.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]])
+#
+# DESCRIPTION
+#
+# This macro figures out how to build C programs using POSIX threads. It
+# sets the PTHREAD_LIBS output variable to the threads library and linker
+# flags, and the PTHREAD_CFLAGS output variable to any special C compiler
+# flags that are needed. (The user can also force certain compiler
+# flags/libs to be tested by setting these environment variables.)
+#
+# Also sets PTHREAD_CC to any special C compiler that is needed for
+# multi-threaded programs (defaults to the value of CC otherwise). (This
+# is necessary on AIX to use the special cc_r compiler alias.)
+#
+# NOTE: You are assumed to not only compile your program with these flags,
+# but also to link with them as well. For example, you might link with
+# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS
+#
+# If you are only building threaded programs, you may wish to use these
+# variables in your default LIBS, CFLAGS, and CC:
+#
+# LIBS="$PTHREAD_LIBS $LIBS"
+# CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+# CC="$PTHREAD_CC"
+#
+# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant
+# has a nonstandard name, this macro defines PTHREAD_CREATE_JOINABLE to
+# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
+#
+# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the
+# PTHREAD_PRIO_INHERIT symbol is defined when compiling with
+# PTHREAD_CFLAGS.
+#
+# ACTION-IF-FOUND is a list of shell commands to run if a threads library
+# is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it
+# is not found. If ACTION-IF-FOUND is not specified, the default action
+# will define HAVE_PTHREAD.
+#
+# Please let the authors know if this macro fails on any platform, or if
+# you have any other suggestions or comments. This macro was based on work
+# by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help
+# from M. Frigo), as well as ac_pthread and hb_pthread macros posted by
+# Alejandro Forero Cuervo to the autoconf macro repository. We are also
+# grateful for the helpful feedback of numerous users.
+#
+# Updated for Autoconf 2.68 by Daniel Richard G.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
+# Copyright (c) 2011 Daniel Richard G. <skunk@iSKUNK.ORG>
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 23
+
+AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD])
+AC_DEFUN([AX_PTHREAD], [
+AC_REQUIRE([AC_CANONICAL_HOST])
+AC_REQUIRE([AC_PROG_CC])
+AC_REQUIRE([AC_PROG_SED])
+AC_LANG_PUSH([C])
+ax_pthread_ok=no
+
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on Tru64 or Sequent).
+# It gets checked for in the link test anyway.
+
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then
+ ax_pthread_save_CC="$CC"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
+ AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes])
+ AC_MSG_RESULT([$ax_pthread_ok])
+ if test "x$ax_pthread_ok" = "xno"; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ CC="$ax_pthread_save_CC"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+fi
+
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
+
+# Create a list of thread flags to try. Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all, and "pthread-config"
+# which is a program returning the flags for the Pth emulation library.
+
+ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
+
+# The ordering *is* (sometimes) important. Some notes on the
+# individual items follow:
+
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+# other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads), Tru64
+# (Note: HP C rejects this with "bad form for `-t' option")
+# -pthreads: Solaris/gcc (Note: HP C also rejects)
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+# doesn't hurt to check since this sometimes defines pthreads and
+# -D_REENTRANT too), HP C (must be checked before -lpthread, which
+# is present but should not be used directly; and before -mthreads,
+# because the compiler interprets this as "-mt" + "-hreads")
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
+# pthread-config: use pthread-config program (for GNU Pth library)
+
+case $host_os in
+
+ freebsd*)
+
+ # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+ # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+
+ ax_pthread_flags="-kthread lthread $ax_pthread_flags"
+ ;;
+
+ hpux*)
+
+ # From the cc(1) man page: "[-mt] Sets various -D flags to enable
+ # multi-threading and also sets -lpthread."
+
+ ax_pthread_flags="-mt -pthread pthread $ax_pthread_flags"
+ ;;
+
+ openedition*)
+
+ # IBM z/OS requires a feature-test macro to be defined in order to
+ # enable POSIX threads at all, so give the user a hint if this is
+ # not set. (We don't define these ourselves, as they can affect
+ # other portions of the system API in unpredictable ways.)
+
+ AC_EGREP_CPP([AX_PTHREAD_ZOS_MISSING],
+ [
+# if !defined(_OPEN_THREADS) && !defined(_UNIX03_THREADS)
+ AX_PTHREAD_ZOS_MISSING
+# endif
+ ],
+ [AC_MSG_WARN([IBM z/OS requires -D_OPEN_THREADS or -D_UNIX03_THREADS to enable pthreads support.])])
+ ;;
+
+ solaris*)
+
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (N.B.: The stubs are missing
+ # pthread_cleanup_push, or rather a function called by this macro,
+ # so we could check for that, but who knows whether they'll stub
+ # that too in a future libc.) So we'll check first for the
+ # standard Solaris way of linking pthreads (-mt -lpthread).
+
+ ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
+ ;;
+esac
+
+# GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC)
+
+AS_IF([test "x$GCC" = "xyes"],
+ [ax_pthread_flags="-pthread -pthreads $ax_pthread_flags"])
+
+# The presence of a feature test macro requesting re-entrant function
+# definitions is, on some systems, a strong hint that pthreads support is
+# correctly enabled
+
+case $host_os in
+ darwin* | hpux* | linux* | osf* | solaris*)
+ ax_pthread_check_macro="_REENTRANT"
+ ;;
+
+ aix*)
+ ax_pthread_check_macro="_THREAD_SAFE"
+ ;;
+
+ *)
+ ax_pthread_check_macro="--"
+ ;;
+esac
+AS_IF([test "x$ax_pthread_check_macro" = "x--"],
+ [ax_pthread_check_cond=0],
+ [ax_pthread_check_cond="!defined($ax_pthread_check_macro)"])
+
+# Are we compiling with Clang?
+
+AC_CACHE_CHECK([whether $CC is Clang],
+ [ax_cv_PTHREAD_CLANG],
+ [ax_cv_PTHREAD_CLANG=no
+ # Note that Autoconf sets GCC=yes for Clang as well as GCC
+ if test "x$GCC" = "xyes"; then
+ AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
+ [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
+# if defined(__clang__) && defined(__llvm__)
+ AX_PTHREAD_CC_IS_CLANG
+# endif
+ ],
+ [ax_cv_PTHREAD_CLANG=yes])
+ fi
+ ])
+ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
+
+ax_pthread_clang_warning=no
+
+# Clang needs special handling, because older versions handle the -pthread
+# option in a rather... idiosyncratic way
+
+if test "x$ax_pthread_clang" = "xyes"; then
+
+ # Clang takes -pthread; it has never supported any other flag
+
+ # (Note 1: This will need to be revisited if a system that Clang
+ # supports has POSIX threads in a separate library. This tends not
+ # to be the way of modern systems, but it's conceivable.)
+
+ # (Note 2: On some systems, notably Darwin, -pthread is not needed
+ # to get POSIX threads support; the API is always present and
+ # active. We could reasonably leave PTHREAD_CFLAGS empty. But
+ # -pthread does define _REENTRANT, and while the Darwin headers
+ # ignore this macro, third-party headers might not.)
+
+ PTHREAD_CFLAGS="-pthread"
+ PTHREAD_LIBS=
+
+ ax_pthread_ok=yes
+
+ # However, older versions of Clang make a point of warning the user
+ # that, in an invocation where only linking and no compilation is
+ # taking place, the -pthread option has no effect ("argument unused
+ # during compilation"). They expect -pthread to be passed in only
+ # when source code is being compiled.
+ #
+ # Problem is, this is at odds with the way Automake and most other
+ # C build frameworks function, which is that the same flags used in
+ # compilation (CFLAGS) are also used in linking. Many systems
+ # supported by AX_PTHREAD require exactly this for POSIX threads
+ # support, and in fact it is often not straightforward to specify a
+ # flag that is used only in the compilation phase and not in
+ # linking. Such a scenario is extremely rare in practice.
+ #
+ # Even though use of the -pthread flag in linking would only print
+ # a warning, this can be a nuisance for well-run software projects
+ # that build with -Werror. So if the active version of Clang has
+ # this misfeature, we search for an option to squash it.
+
+ AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
+ # Create an alternate version of $ac_link that compiles and
+ # links in two steps (.c -> .o, .o -> exe) instead of one
+ # (.c -> exe), because the warning occurs only in the second
+ # step
+ ax_pthread_save_ac_link="$ac_link"
+ ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
+ ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
+ ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
+ AS_IF([test "x$ax_pthread_try" = "xunknown"], [break])
+ CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
+ ac_link="$ax_pthread_save_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [ac_link="$ax_pthread_2step_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [break])
+ ])
+ done
+ ac_link="$ax_pthread_save_ac_link"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no])
+ ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
+ ])
+
+ case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
+ no | unknown) ;;
+ *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
+ esac
+
+fi # $ax_pthread_clang = yes
+
+if test "x$ax_pthread_ok" = "xno"; then
+for ax_pthread_try_flag in $ax_pthread_flags; do
+
+ case $ax_pthread_try_flag in
+ none)
+ AC_MSG_CHECKING([whether pthreads work without any flags])
+ ;;
+
+ -mt,pthread)
+ AC_MSG_CHECKING([whether pthreads work with -mt -lpthread])
+ PTHREAD_CFLAGS="-mt"
+ PTHREAD_LIBS="-lpthread"
+ ;;
+
+ -*)
+ AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
+ PTHREAD_CFLAGS="$ax_pthread_try_flag"
+ ;;
+
+ pthread-config)
+ AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
+ AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
+ PTHREAD_LIBS="-l$ax_pthread_try_flag"
+ ;;
+ esac
+
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
+# if $ax_pthread_check_cond
+# error "$ax_pthread_check_macro must be defined"
+# endif
+ static void routine(void *a) { a = 0; }
+ static void *start_routine(void *a) { return a; }],
+ [pthread_t th; pthread_attr_t attr;
+ pthread_create(&th, 0, start_routine, 0);
+ pthread_join(th, 0);
+ pthread_attr_init(&attr);
+ pthread_cleanup_push(routine, 0);
+ pthread_cleanup_pop(0) /* ; */])],
+ [ax_pthread_ok=yes],
+ [])
+
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+
+ AC_MSG_RESULT([$ax_pthread_ok])
+ AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+done
+fi
+
+# Various other checks:
+if test "x$ax_pthread_ok" = "xyes"; then
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ AC_CACHE_CHECK([for joinable pthread attribute],
+ [ax_cv_PTHREAD_JOINABLE_ATTR],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=unknown
+ for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>],
+ [int attr = $ax_pthread_attr; return attr /* ; */])],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break],
+ [])
+ done
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
+ test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
+ test "x$ax_pthread_joinable_attr_defined" != "xyes"],
+ [AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE],
+ [$ax_cv_PTHREAD_JOINABLE_ATTR],
+ [Define to necessary symbol if this constant
+ uses a non-standard name on your system.])
+ ax_pthread_joinable_attr_defined=yes
+ ])
+
+ AC_CACHE_CHECK([whether more special flags are required for pthreads],
+ [ax_cv_PTHREAD_SPECIAL_FLAGS],
+ [ax_cv_PTHREAD_SPECIAL_FLAGS=no
+ case $host_os in
+ solaris*)
+ ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
+ ;;
+ esac
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \
+ test "x$ax_pthread_special_flags_added" != "xyes"],
+ [PTHREAD_CFLAGS="$ax_cv_PTHREAD_SPECIAL_FLAGS $PTHREAD_CFLAGS"
+ ax_pthread_special_flags_added=yes])
+
+ AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
+ [ax_cv_PTHREAD_PRIO_INHERIT],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
+ [[int i = PTHREAD_PRIO_INHERIT;]])],
+ [ax_cv_PTHREAD_PRIO_INHERIT=yes],
+ [ax_cv_PTHREAD_PRIO_INHERIT=no])
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
+ test "x$ax_pthread_prio_inherit_defined" != "xyes"],
+ [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])
+ ax_pthread_prio_inherit_defined=yes
+ ])
+
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+
+ # More AIX lossage: compile with *_r variant
+ if test "x$GCC" != "xyes"; then
+ case $host_os in
+ aix*)
+ AS_CASE(["x/$CC"],
+ [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
+ [#handle absolute path differently from PATH based program lookup
+ AS_CASE(["x$CC"],
+ [x/*],
+ [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
+ [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
+ ;;
+ esac
+ fi
+fi
+
+test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
+
+AC_SUBST([PTHREAD_LIBS])
+AC_SUBST([PTHREAD_CFLAGS])
+AC_SUBST([PTHREAD_CC])
+
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test "x$ax_pthread_ok" = "xyes"; then
+ ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
+ :
+else
+ ax_pthread_ok=no
+ $2
+fi
+AC_LANG_POP
+])dnl AX_PTHREAD
diff --git a/src/pcap-thread/pcap_thread.c b/src/pcap-thread/pcap_thread.c
new file mode 100644
index 0000000..8acdcbe
--- /dev/null
+++ b/src/pcap-thread/pcap_thread.c
@@ -0,0 +1,3818 @@
+/*
+ * Author Jerry Lundström <jerry@dns-oarc.net>
+ * Copyright (c) 2016-2017, OARC, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "pcap_thread.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/select.h>
+
+#ifndef PCAP_THREAD_LAYER_TRACE
+#define PCAP_THREAD_LAYER_TRACE 0
+#endif
+
+/*
+ * Forward declares for layer callbacks
+ */
+
+static void pcap_thread_callback(u_char* user, const struct pcap_pkthdr* pkthdr, const u_char* pkt, const char* name, int dlt);
+static void pcap_thread_callback_linux_sll(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_ether(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_null(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_loop(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_ieee802(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_gre(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_ip(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_ipv4(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_ipv6(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_icmp(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_icmpv6(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_udp(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+static void pcap_thread_callback_tcp(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+
+/*
+ * Version
+ */
+
+static const char* _version = PCAP_THREAD_VERSION_STR;
+
+const char* pcap_thread_version_str(void)
+{
+ return _version;
+}
+
+int pcap_thread_version_major(void)
+{
+ return PCAP_THREAD_VERSION_MAJOR;
+}
+
+int pcap_thread_version_minor(void)
+{
+ return PCAP_THREAD_VERSION_MINOR;
+}
+
+int pcap_thread_version_patch(void)
+{
+ return PCAP_THREAD_VERSION_PATCH;
+}
+
+/*
+ * Create/Free
+ */
+
+static pcap_thread_t _pcap_thread_defaults = PCAP_THREAD_T_INIT;
+
+pcap_thread_t* pcap_thread_create(void)
+{
+ pcap_thread_t* pcap_thread = calloc(1, sizeof(pcap_thread_t));
+ if (pcap_thread) {
+ memcpy(pcap_thread, &_pcap_thread_defaults, sizeof(pcap_thread_t));
+ }
+
+ return pcap_thread;
+}
+
+void pcap_thread_free(pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return;
+ }
+
+ pcap_thread_close(pcap_thread);
+ if (pcap_thread->filter) {
+ free(pcap_thread->filter);
+ }
+ free(pcap_thread);
+}
+
+/*
+ * Get/Set
+ */
+
+int pcap_thread_use_threads(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->use_threads;
+}
+
+int pcap_thread_set_use_threads(pcap_thread_t* pcap_thread, const int use_threads)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->use_threads = use_threads;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_use_layers(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->use_layers;
+}
+
+int pcap_thread_set_use_layers(pcap_thread_t* pcap_thread, const int use_layers)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->use_layers = use_layers;
+
+ return PCAP_THREAD_OK;
+}
+
+pcap_thread_queue_mode_t pcap_thread_queue_mode(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->queue_mode;
+}
+
+int pcap_thread_set_queue_mode(pcap_thread_t* pcap_thread, const pcap_thread_queue_mode_t queue_mode)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ switch (queue_mode) {
+ case PCAP_THREAD_QUEUE_MODE_COND:
+ case PCAP_THREAD_QUEUE_MODE_DIRECT:
+ break;
+ case PCAP_THREAD_QUEUE_MODE_YIELD:
+ case PCAP_THREAD_QUEUE_MODE_WAIT:
+ case PCAP_THREAD_QUEUE_MODE_DROP:
+ return PCAP_THREAD_EOBSOLETE;
+ default:
+ return PCAP_THREAD_EINVAL;
+ }
+
+ pcap_thread->queue_mode = queue_mode;
+
+ return PCAP_THREAD_OK;
+}
+
+struct timeval pcap_thread_queue_wait(const pcap_thread_t* pcap_thread)
+{
+ static struct timeval tv = { 0, 0 };
+ return tv;
+}
+
+int pcap_thread_set_queue_wait(pcap_thread_t* pcap_thread, const struct timeval queue_wait)
+{
+ return PCAP_THREAD_EOBSOLETE;
+}
+
+pcap_thread_queue_mode_t pcap_thread_callback_queue_mode(const pcap_thread_t* pcap_thread)
+{
+ return PCAP_THREAD_EOBSOLETE;
+}
+
+int pcap_thread_set_callback_queue_mode(pcap_thread_t* pcap_thread, const pcap_thread_queue_mode_t callback_queue_mode)
+{
+ return PCAP_THREAD_EOBSOLETE;
+}
+
+struct timeval pcap_thread_callback_queue_wait(const pcap_thread_t* pcap_thread)
+{
+ static struct timeval tv = { 0, 0 };
+ return tv;
+}
+
+int pcap_thread_set_callback_queue_wait(pcap_thread_t* pcap_thread, const struct timeval callback_queue_wait)
+{
+ return PCAP_THREAD_EOBSOLETE;
+}
+
+int pcap_thread_snapshot(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->snapshot;
+}
+
+int pcap_thread_snaplen(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->snaplen;
+}
+
+int pcap_thread_set_snaplen(pcap_thread_t* pcap_thread, const int snaplen)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->snaplen = snaplen;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_promiscuous(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->promiscuous;
+}
+
+int pcap_thread_set_promiscuous(pcap_thread_t* pcap_thread, const int promiscuous)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->promiscuous = promiscuous;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_monitor(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->monitor;
+}
+
+int pcap_thread_set_monitor(pcap_thread_t* pcap_thread, const int monitor)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->monitor = monitor;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_timeout(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->timeout;
+}
+
+int pcap_thread_set_timeout(pcap_thread_t* pcap_thread, const int timeout)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->timeout = timeout;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_buffer_size(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->buffer_size;
+}
+
+int pcap_thread_set_buffer_size(pcap_thread_t* pcap_thread, const int buffer_size)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->buffer_size = buffer_size;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_timestamp_type(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->timestamp_type;
+}
+
+int pcap_thread_set_timestamp_type(pcap_thread_t* pcap_thread, const int timestamp_type)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->have_timestamp_type = 1;
+ pcap_thread->timestamp_type = timestamp_type;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_timestamp_precision(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->timestamp_precision;
+}
+
+int pcap_thread_set_timestamp_precision(pcap_thread_t* pcap_thread, const int timestamp_precision)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->have_timestamp_precision = 1;
+ pcap_thread->timestamp_precision = timestamp_precision;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_immediate_mode(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->immediate_mode;
+}
+
+int pcap_thread_set_immediate_mode(pcap_thread_t* pcap_thread, const int immediate_mode)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->immediate_mode = immediate_mode;
+
+ return PCAP_THREAD_OK;
+}
+
+pcap_direction_t pcap_thread_direction(const pcap_thread_t* pcap_thread)
+{
+#ifdef HAVE_PCAP_DIRECTION_T
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->direction;
+#else
+ return 0;
+#endif
+}
+
+int pcap_thread_set_direction(pcap_thread_t* pcap_thread, const pcap_direction_t direction)
+{
+#ifdef HAVE_PCAP_DIRECTION_T
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->have_direction = 1;
+ pcap_thread->direction = direction;
+
+ return PCAP_THREAD_OK;
+#else
+ return PCAP_THREAD_ENODIR;
+#endif
+}
+
+const char* pcap_thread_filter(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return 0;
+ }
+
+ return pcap_thread->filter;
+}
+
+int pcap_thread_set_filter(pcap_thread_t* pcap_thread, const char* filter, const size_t filter_len)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!filter) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!filter_len) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->filter) {
+ free(pcap_thread->filter);
+ }
+ if (!(pcap_thread->filter = strndup(filter, filter_len))) {
+ return PCAP_THREAD_ENOMEM;
+ }
+ pcap_thread->filter_len = filter_len;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_clear_filter(pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->filter) {
+ free(pcap_thread->filter);
+ pcap_thread->filter = 0;
+ pcap_thread->filter_len = 0;
+ }
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_filter_errno(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->filter_errno;
+}
+
+int pcap_thread_filter_optimize(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->filter_optimize;
+}
+
+int pcap_thread_set_filter_optimize(pcap_thread_t* pcap_thread, const int filter_optimize)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->filter_optimize = filter_optimize;
+
+ return PCAP_THREAD_OK;
+}
+
+bpf_u_int32 pcap_thread_filter_netmask(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->filter_netmask;
+}
+
+int pcap_thread_set_filter_netmask(pcap_thread_t* pcap_thread, const bpf_u_int32 filter_netmask)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->filter_netmask = filter_netmask;
+
+ return PCAP_THREAD_OK;
+}
+
+struct timeval pcap_thread_timedrun(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ static struct timeval tv = { 0, 0 };
+ return tv;
+ }
+
+ return pcap_thread->timedrun;
+}
+
+int pcap_thread_set_timedrun(pcap_thread_t* pcap_thread, const struct timeval timedrun)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->timedrun = timedrun;
+
+ return PCAP_THREAD_OK;
+}
+
+struct timeval pcap_thread_timedrun_to(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ static struct timeval tv = { 0, 0 };
+ return tv;
+ }
+
+ return pcap_thread->timedrun_to;
+}
+
+int pcap_thread_set_timedrun_to(pcap_thread_t* pcap_thread, const struct timeval timedrun_to)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->timedrun_to = timedrun_to;
+
+ return PCAP_THREAD_OK;
+}
+
+pcap_thread_activate_mode_t pcap_thread_activate_mode(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_DEFAULT_ACTIVATE_MODE;
+ }
+
+ return pcap_thread->activate_mode;
+}
+
+int pcap_thread_set_activate_mode(pcap_thread_t* pcap_thread, const pcap_thread_activate_mode_t activate_mode)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->activate_mode = activate_mode;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_was_stopped(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ return pcap_thread->was_stopped;
+}
+
+/*
+ * Queue
+ */
+
+size_t pcap_thread_queue_size(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return -1;
+ }
+
+ return pcap_thread->queue_size;
+}
+
+int pcap_thread_set_queue_size(pcap_thread_t* pcap_thread, const size_t queue_size)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!queue_size) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->queue_size = queue_size;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback(pcap_thread_t* pcap_thread, pcap_thread_callback_t callback)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback = callback;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_dropback(pcap_thread_t* pcap_thread, pcap_thread_callback_t dropback)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->dropback = dropback;
+
+ return PCAP_THREAD_OK;
+}
+
+/*
+ * Layers
+ */
+
+int pcap_thread_set_callback_linux_sll(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_linux_sll)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_linux_sll = callback_linux_sll;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ether(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ether)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ether = callback_ether;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_null(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_null)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_null = callback_null;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_loop(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_loop)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_loop = callback_loop;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ieee802(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ieee802)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ieee802 = callback_ieee802;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_gre(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_gre)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_gre = callback_gre;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ip(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ip)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ip = callback_ip;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ipv4(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ipv4)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ipv4 = callback_ipv4;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ipv4_frag(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_frag_t callback_ipv4_frag)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!callback_ipv4_frag.new
+ || !callback_ipv4_frag.free
+ || !callback_ipv4_frag.reassemble
+ || !callback_ipv4_frag.release) {
+ if (callback_ipv4_frag.new
+ || callback_ipv4_frag.free
+ || callback_ipv4_frag.reassemble
+ || callback_ipv4_frag.release) {
+ return PCAP_THREAD_EINVAL;
+ }
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ipv4_frag = callback_ipv4_frag;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ipv6(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ipv6)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ipv6 = callback_ipv6;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_ipv6_frag(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_frag_t callback_ipv6_frag)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!callback_ipv6_frag.new
+ || !callback_ipv6_frag.free
+ || !callback_ipv6_frag.reassemble
+ || !callback_ipv6_frag.release) {
+ if (callback_ipv6_frag.new
+ || callback_ipv6_frag.free
+ || callback_ipv6_frag.reassemble
+ || callback_ipv6_frag.release) {
+ return PCAP_THREAD_EINVAL;
+ }
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_ipv6_frag = callback_ipv6_frag;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_icmp(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_icmp)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_icmp = callback_icmp;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_icmpv6(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_icmpv6)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_icmpv6 = callback_icmpv6;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_udp(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_udp)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_udp = callback_udp;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_tcp(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_tcp)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6) {
+ return PCAP_THREAD_ELAYERCB;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_tcp = callback_tcp;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_set_callback_invalid(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_invalid)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ pcap_thread->callback_invalid = callback_invalid;
+
+ return PCAP_THREAD_OK;
+}
+
+#define need4x2(v1, v2, p, l) \
+ if (l < 1) { \
+ break; \
+ } \
+ v1 = (*p) >> 4; \
+ v2 = (*p) & 0xf; \
+ p += 1; \
+ l -= 1
+
+#define need8(v, p, l) \
+ if (l < 1) { \
+ break; \
+ } \
+ v = *p; \
+ p += 1; \
+ l -= 1
+
+#define need16(v, p, l) \
+ if (l < 2) { \
+ break; \
+ } \
+ v = (*p << 8) + *(p + 1); \
+ p += 2; \
+ l -= 2
+
+#define need32(v, p, l) \
+ if (l < 4) { \
+ break; \
+ } \
+ v = (*p << 24) + (*(p + 1) << 16) + (*(p + 2) << 8) + *(p + 3); \
+ p += 4; \
+ l -= 4
+
+#define needxb(b, x, p, l) \
+ if (l < x) { \
+ break; \
+ } \
+ memcpy(b, p, x); \
+ p += x; \
+ l -= x
+
+#define advancexb(x, p, l) \
+ if (l < x) { \
+ break; \
+ } \
+ p += x; \
+ l -= x
+
+#if PCAP_THREAD_LAYER_TRACE
+#define layer_trace(msg) printf("LT %s:%d: " msg "\n", __FILE__, __LINE__)
+#define layer_tracef(msg, args...) printf("LT %s:%d: " msg "\n", __FILE__, __LINE__, args)
+#else
+#define layer_trace(msg)
+#define layer_tracef(msg, args...)
+#endif
+
+static void pcap_thread_callback(u_char* user, const struct pcap_pkthdr* pkthdr, const u_char* pkt, const char* name, int dlt)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ size_t length;
+ pcap_thread_packet_t packet;
+ const u_char* orig = pkt;
+ size_t origlength;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!pkthdr) {
+ return;
+ }
+ if (!pkt) {
+ return;
+ }
+ if (!name) {
+ return;
+ }
+
+ memset(&packet, 0, sizeof(packet));
+ packet.name = name;
+ packet.dlt = dlt;
+ packet.pkthdr = *pkthdr;
+ packet.have_pkthdr = 1;
+ length = pkthdr->caplen;
+ origlength = length;
+
+ layer_tracef("packet, length %lu", length);
+
+ switch (dlt) {
+ case DLT_NULL:
+ layer_trace("dlt_null");
+ {
+ uint8_t hdr[4];
+
+ packet.state = PCAP_THREAD_PACKET_INVALID_NULL;
+ need8(hdr[0], pkt, length);
+ need8(hdr[1], pkt, length);
+ need8(hdr[2], pkt, length);
+ need8(hdr[3], pkt, length);
+ packet.state = PCAP_THREAD_PACKET_OK;
+
+ /*
+ * The header for null is in host byte order but may not be
+ * in the same endian as host if coming from a savefile
+ */
+
+ if (pcaplist->is_offline && pcap_is_swapped(pcaplist->pcap)) {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ packet.nullhdr.family = hdr[3] + (hdr[2] << 8) + (hdr[1] << 16) + (hdr[0] << 24);
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ packet.nullhdr.family = hdr[0] + (hdr[1] << 8) + (hdr[2] << 16) + (hdr[3] << 24);
+#else
+#error "Please fix <endian.h>"
+#endif
+ } else {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ packet.nullhdr.family = hdr[0] + (hdr[1] << 8) + (hdr[2] << 16) + (hdr[3] << 24);
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ packet.nullhdr.family = hdr[3] + (hdr[2] << 8) + (hdr[1] << 16) + (hdr[0] << 24);
+#else
+#error "Please fix <endian.h>"
+#endif
+ }
+ packet.have_nullhdr = 1;
+
+ if (pcaplist->pcap_thread->callback_null)
+ pcaplist->pcap_thread->callback_null(pcaplist->user, &packet, pkt, length);
+ else
+ pcap_thread_callback_null((void*)pcaplist, &packet, pkt, length);
+ return;
+ }
+ break;
+
+ case DLT_EN10MB:
+ layer_trace("dlt_en10mb");
+ packet.state = PCAP_THREAD_PACKET_INVALID_ETHER;
+ needxb(packet.ethhdr.ether_dhost, sizeof(packet.ethhdr.ether_dhost), pkt, length);
+ needxb(packet.ethhdr.ether_shost, sizeof(packet.ethhdr.ether_shost), pkt, length);
+ need16(packet.ethhdr.ether_type, pkt, length);
+ packet.state = PCAP_THREAD_PACKET_OK;
+ packet.have_ethhdr = 1;
+
+ if (pcaplist->pcap_thread->callback_ether)
+ pcaplist->pcap_thread->callback_ether(pcaplist->user, &packet, pkt, length);
+ else
+ pcap_thread_callback_ether((void*)pcaplist, &packet, pkt, length);
+ return;
+
+ case DLT_LOOP:
+ layer_trace("dlt_loop");
+ packet.state = PCAP_THREAD_PACKET_INVALID_LOOP;
+ need32(packet.loophdr.family, pkt, length);
+ packet.state = PCAP_THREAD_PACKET_OK;
+ packet.have_loophdr = 1;
+
+ if (pcaplist->pcap_thread->callback_loop)
+ pcaplist->pcap_thread->callback_loop(pcaplist->user, &packet, pkt, length);
+ else
+ pcap_thread_callback_loop((void*)pcaplist, &packet, pkt, length);
+ return;
+
+ case DLT_RAW:
+#ifdef DLT_IPV4
+ case DLT_IPV4:
+#endif
+#ifdef DLT_IPV6
+ case DLT_IPV6:
+#endif
+ layer_trace("dlt_raw/ipv4/ipv6");
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, &packet, pkt, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, &packet, pkt, length);
+ return;
+
+ case DLT_LINUX_SLL:
+ layer_trace("dlt_linux_sll");
+ packet.state = PCAP_THREAD_PACKET_INVALID_LINUX_SLL;
+ need16(packet.linux_sll.packet_type, pkt, length);
+ need16(packet.linux_sll.arp_hardware, pkt, length);
+ need16(packet.linux_sll.link_layer_address_length, pkt, length);
+ needxb(packet.linux_sll.link_layer_address, 8, pkt, length);
+ need16(packet.linux_sll.ether_type, pkt, length);
+ packet.state = PCAP_THREAD_PACKET_OK;
+ packet.have_linux_sll = 1;
+
+ if (pcaplist->pcap_thread->callback_linux_sll)
+ pcaplist->pcap_thread->callback_linux_sll(pcaplist->user, &packet, pkt, length);
+ else
+ pcap_thread_callback_linux_sll((void*)pcaplist, &packet, pkt, length);
+ return;
+
+ /* TODO: These might be interesting to implement
+ case DLT_IPNET:
+ case DLT_PKTAP:
+ */
+
+ default:
+ packet.state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet.state == PCAP_THREAD_PACKET_OK)
+ packet.state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, &packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_linux_sll(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_linux_sll) {
+ layer_trace("have_linux_sll");
+ switch (packet->linux_sll.ether_type) {
+ case 0x8100: /* 802.1q */
+ case 0x88a8: /* 802.1ad */
+ case 0x9100: /* 802.1 QinQ non-standard */
+ if (packet->have_ieee802hdr)
+ break;
+
+ {
+ uint16_t tci;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_IEEE802;
+ need16(tci, payload, length);
+ packet->ieee802hdr.pcp = (tci & 0xe000) >> 13;
+ packet->ieee802hdr.dei = (tci & 0x1000) >> 12;
+ packet->ieee802hdr.vid = tci & 0x0fff;
+ need16(packet->ieee802hdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_ieee802hdr = 1;
+ }
+
+ if (pcaplist->pcap_thread->callback_ieee802)
+ pcaplist->pcap_thread->callback_ieee802(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ieee802((void*)pcaplist, packet, payload, length);
+ return;
+
+ case ETHERTYPE_IP:
+ case ETHERTYPE_IPV6:
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, packet, payload, length);
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_ether(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_ethhdr) {
+ layer_trace("have_ethhdr");
+ switch (packet->ethhdr.ether_type) {
+ case 0x8100: /* 802.1q */
+ case 0x88a8: /* 802.1ad */
+ case 0x9100: /* 802.1 QinQ non-standard */
+ if (packet->have_ieee802hdr)
+ break;
+
+ {
+ uint16_t tci;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_IEEE802;
+ need16(tci, payload, length);
+ packet->ieee802hdr.pcp = (tci & 0xe000) >> 13;
+ packet->ieee802hdr.dei = (tci & 0x1000) >> 12;
+ packet->ieee802hdr.vid = tci & 0x0fff;
+ need16(packet->ieee802hdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_ieee802hdr = 1;
+ }
+
+ if (pcaplist->pcap_thread->callback_ieee802)
+ pcaplist->pcap_thread->callback_ieee802(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ieee802((void*)pcaplist, packet, payload, length);
+ return;
+
+ case ETHERTYPE_IP:
+ case ETHERTYPE_IPV6:
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, packet, payload, length);
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_null(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_nullhdr) {
+ layer_trace("have_nullhdr");
+
+ /* From libpcap link types documentation:
+ * containing a value of 2 for IPv4 packets, a value of either 24, 28,
+ * or 30 for IPv6 packets, a value of 7 for OSI packets, or a value of 23
+ * for IPX packets. All of the IPv6 values correspond to IPv6 packets;
+ * code reading files should check for all of them.
+ */
+
+ switch (packet->nullhdr.family) {
+ case 2:
+ case 24:
+ case 28:
+ case 30:
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, packet, payload, length);
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_loop(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_loophdr) {
+ layer_trace("have_loophdr");
+
+ /* From libpcap link types documentation:
+ * containing a value of 2 for IPv4 packets, a value of either 24, 28,
+ * or 30 for IPv6 packets, a value of 7 for OSI packets, or a value of 23
+ * for IPX packets. All of the IPv6 values correspond to IPv6 packets;
+ * code reading files should check for all of them.
+ */
+
+ switch (packet->loophdr.family) {
+ case 2:
+ case 24:
+ case 28:
+ case 30:
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, packet, payload, length);
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_ieee802(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_ieee802hdr) {
+ layer_trace("have_ieee802hdr");
+
+ switch (packet->ieee802hdr.ether_type) {
+ case 0x88a8: /* 802.1ad */
+ case 0x9100: /* 802.1 QinQ non-standard */
+ {
+ pcap_thread_packet_t ieee802pkt;
+ uint16_t tci;
+
+ memset(&ieee802pkt, 0, sizeof(ieee802pkt));
+ ieee802pkt.prevpkt = packet;
+ ieee802pkt.have_prevpkt = 1;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_IEEE802;
+ need16(tci, payload, length);
+ ieee802pkt.ieee802hdr.pcp = (tci & 0xe000) >> 13;
+ ieee802pkt.ieee802hdr.dei = (tci & 0x1000) >> 12;
+ ieee802pkt.ieee802hdr.vid = tci & 0x0fff;
+ need16(ieee802pkt.ieee802hdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ ieee802pkt.have_ieee802hdr = 1;
+
+ if (pcaplist->pcap_thread->callback_ieee802)
+ pcaplist->pcap_thread->callback_ieee802(pcaplist->user, &ieee802pkt, payload, length);
+ else
+ pcap_thread_callback_ieee802((void*)pcaplist, &ieee802pkt, payload, length);
+ return;
+ }
+
+ case ETHERTYPE_IP:
+ case ETHERTYPE_IPV6:
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, packet, payload, length);
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_gre(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_grehdr) {
+ pcap_thread_packet_t grepkt;
+
+ layer_trace("have_grehdr");
+
+ memset(&grepkt, 0, sizeof(grepkt));
+ grepkt.prevpkt = packet;
+ grepkt.have_prevpkt = 1;
+
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_INVALID_GRE;
+ if (packet->grehdr.gre_flags & 0x1) {
+ need16(packet->gre.checksum, payload, length);
+ }
+ if (packet->grehdr.gre_flags & 0x4) {
+ need16(packet->gre.key, payload, length);
+ }
+ if (packet->grehdr.gre_flags & 0x8) {
+ need16(packet->gre.sequence, payload, length);
+ }
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_gre = 1;
+
+ switch (packet->grehdr.ether_type) {
+ case ETHERTYPE_IP:
+ case ETHERTYPE_IPV6:
+ if (pcaplist->pcap_thread->callback_ip)
+ pcaplist->pcap_thread->callback_ip(pcaplist->user, &grepkt, payload, length);
+ else
+ pcap_thread_callback_ip((void*)pcaplist, &grepkt, payload, length);
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_ip(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (!packet->have_iphdr && !packet->have_ip6hdr) {
+ layer_trace("checking for ip");
+
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_INVALID_IP;
+ need4x2(packet->iphdr.ip_v, packet->iphdr.ip_hl, payload, length);
+ if (packet->iphdr.ip_v == 4) {
+ packet->state = PCAP_THREAD_PACKET_INVALID_IPV4;
+ need8(packet->iphdr.ip_tos, payload, length);
+ need16(packet->iphdr.ip_len, payload, length);
+ need16(packet->iphdr.ip_id, payload, length);
+ need16(packet->iphdr.ip_off, payload, length);
+ need8(packet->iphdr.ip_ttl, payload, length);
+ need8(packet->iphdr.ip_p, payload, length);
+ need16(packet->iphdr.ip_sum, payload, length);
+ needxb(&(packet->iphdr.ip_src.s_addr), 4, payload, length);
+ needxb(&(packet->iphdr.ip_dst.s_addr), 4, payload, length);
+
+ /* TODO: IPv4 options */
+
+ if (packet->iphdr.ip_hl < 5)
+ break;
+ if (packet->iphdr.ip_hl > 5) {
+ advancexb((packet->iphdr.ip_hl - 5) * 4, payload, length);
+ }
+
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_iphdr = 1;
+
+ if (pcaplist->pcap_thread->callback_ipv4)
+ pcaplist->pcap_thread->callback_ipv4(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ipv4((void*)pcaplist, packet, payload, length);
+ return;
+ } else if (packet->iphdr.ip_v == 6) {
+ /*
+ * Clear IPv4 headers and reverse reading one byte
+ */
+ packet->iphdr.ip_v = 0;
+ packet->iphdr.ip_hl = 0;
+ payload--;
+ length++;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_IPV6;
+ need32(packet->ip6hdr.ip6_flow, payload, length);
+ need16(packet->ip6hdr.ip6_plen, payload, length);
+ need8(packet->ip6hdr.ip6_nxt, payload, length);
+ need8(packet->ip6hdr.ip6_hlim, payload, length);
+ needxb(&(packet->ip6hdr.ip6_src), 16, payload, length);
+ needxb(&(packet->ip6hdr.ip6_dst), 16, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_ip6hdr = 1;
+
+ if (pcaplist->pcap_thread->callback_ipv6)
+ pcaplist->pcap_thread->callback_ipv6(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_ipv6((void*)pcaplist, packet, payload, length);
+ return;
+ }
+
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_ipv4(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+ int release_frag = 0;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_iphdr) {
+ layer_trace("have_iphdr");
+
+ for (;;) {
+ /* Check reported length for missing payload or padding */
+ if (packet->iphdr.ip_len < (packet->iphdr.ip_hl * 4)) {
+ layer_trace("ip_len < ip header");
+ packet->state = PCAP_THREAD_PACKET_INVALID_IPV4;
+ break;
+ }
+ if (length < (packet->iphdr.ip_len - (packet->iphdr.ip_hl * 4))) {
+ layer_trace("length < (ip_len - ip header)");
+ packet->state = PCAP_THREAD_PACKET_INVALID_IPV4;
+ break;
+ }
+ if (length > (packet->iphdr.ip_len - (packet->iphdr.ip_hl * 4))) {
+ layer_trace("have_ippadding");
+ packet->ippadding = length - (packet->iphdr.ip_len - (packet->iphdr.ip_hl * 4));
+ packet->have_ippadding = 1;
+ length -= packet->ippadding;
+ }
+
+ /* Check if packet wants more fragments or has an offset */
+ if (packet->iphdr.ip_off & 0x2000 || packet->iphdr.ip_off & 0x1fff) {
+ layer_trace("is_v4_frag");
+
+ if (pcaplist->pcap_thread->callback_ipv4_frag.reassemble) {
+ pcap_thread_packet_t* whole_packet = 0;
+ const u_char* whole_payload = 0;
+ size_t whole_length = 0;
+
+ packet->state = pcaplist->pcap_thread->callback_ipv4_frag.reassemble(pcaplist->ipv4_frag_ctx, packet, payload, length, &whole_packet, &whole_payload, &whole_length);
+
+ /* Defragmentation failed some how, we return packet as invalid */
+ if (packet->state != PCAP_THREAD_PACKET_OK) {
+ break;
+ }
+
+ /* No whole/defragmented packet return, need more */
+ if (!whole_packet || !whole_payload || !whole_length) {
+ return;
+ }
+
+ layer_tracef("v4_reasm %p %p %lu", whole_packet, whole_payload, whole_length);
+
+ packet = whole_packet;
+ payload = whole_payload;
+ length = whole_length;
+ release_frag = 1;
+ } else {
+ /*
+ * Mark packet as fragment and send it to the next user
+ * layer (if any) or return it as invalid.
+ */
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+
+ switch (packet->iphdr.ip_p) {
+ case IPPROTO_GRE:
+ layer_trace("ipproto_gre frag");
+
+ if (!(packet->iphdr.ip_off & 0x1fff)) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_GREHDR;
+ need16(packet->grehdr.gre_flags, payload, length);
+ need16(packet->grehdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_grehdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_gre) {
+ pcaplist->pcap_thread->callback_gre(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ case IPPROTO_ICMP:
+ layer_trace("ipproto_icmp frag");
+
+ if (!(packet->iphdr.ip_off & 0x1fff)) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_ICMPHDR;
+ need8(packet->icmphdr.type, payload, length);
+ need8(packet->icmphdr.code, payload, length);
+ need16(packet->icmphdr.checksum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_icmphdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_icmp) {
+ pcaplist->pcap_thread->callback_icmp(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ case IPPROTO_UDP:
+ layer_trace("ipproto_udp frag");
+
+ if (!(packet->iphdr.ip_off & 0x1fff)) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_UDPHDR;
+ need16(packet->udphdr.uh_sport, payload, length);
+ need16(packet->udphdr.uh_dport, payload, length);
+ need16(packet->udphdr.uh_ulen, payload, length);
+ need16(packet->udphdr.uh_sum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_udphdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_udp) {
+ pcaplist->pcap_thread->callback_udp(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ case IPPROTO_TCP:
+ layer_trace("ipproto_tcp frag");
+
+ if (!(packet->iphdr.ip_off & 0x1fff)) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_TCPHDR;
+ need16(packet->tcphdr.th_sport, payload, length);
+ need16(packet->tcphdr.th_dport, payload, length);
+ need32(packet->tcphdr.th_seq, payload, length);
+ need32(packet->tcphdr.th_ack, payload, length);
+ need4x2(packet->tcphdr.th_off, packet->tcphdr.th_x2, payload, length);
+ need8(packet->tcphdr.th_flags, payload, length);
+ need16(packet->tcphdr.th_win, payload, length);
+ need16(packet->tcphdr.th_sum, payload, length);
+ need16(packet->tcphdr.th_urp, payload, length);
+ if (packet->tcphdr.th_off > 5) {
+ packet->tcpopts_len = (packet->tcphdr.th_off - 5) * 4;
+ needxb(&(packet->tcpopts[0]), packet->tcpopts_len, payload, length);
+ packet->have_tcpopts = 1;
+ }
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_tcphdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_tcp) {
+ pcaplist->pcap_thread->callback_tcp(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+ }
+
+ switch (packet->iphdr.ip_p) {
+ case IPPROTO_GRE:
+ layer_trace("ipproto_gre");
+
+ if (packet->have_grehdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_GRE;
+ need16(packet->grehdr.gre_flags, payload, length);
+ need16(packet->grehdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_grehdr = 1;
+
+ if (pcaplist->pcap_thread->callback_gre)
+ pcaplist->pcap_thread->callback_gre(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_gre((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv4_frag.release(pcaplist->ipv4_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ case IPPROTO_ICMP:
+ layer_trace("ipproto_icmp");
+
+ if (packet->have_icmphdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_ICMP;
+ need8(packet->icmphdr.type, payload, length);
+ need8(packet->icmphdr.code, payload, length);
+ need16(packet->icmphdr.checksum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_icmphdr = 1;
+
+ if (pcaplist->pcap_thread->callback_icmp)
+ pcaplist->pcap_thread->callback_icmp(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_icmp((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv4_frag.release(pcaplist->ipv4_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ case IPPROTO_UDP:
+ layer_trace("ipproto_udp");
+
+ if (packet->have_udphdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_UDP;
+ need16(packet->udphdr.uh_sport, payload, length);
+ need16(packet->udphdr.uh_dport, payload, length);
+ need16(packet->udphdr.uh_ulen, payload, length);
+ need16(packet->udphdr.uh_sum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_udphdr = 1;
+
+ if (pcaplist->pcap_thread->callback_udp)
+ pcaplist->pcap_thread->callback_udp(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_udp((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv4_frag.release(pcaplist->ipv4_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ case IPPROTO_TCP:
+ layer_trace("ipproto_tcp");
+
+ if (packet->have_tcphdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_TCP;
+ need16(packet->tcphdr.th_sport, payload, length);
+ need16(packet->tcphdr.th_dport, payload, length);
+ need32(packet->tcphdr.th_seq, payload, length);
+ need32(packet->tcphdr.th_ack, payload, length);
+ need4x2(packet->tcphdr.th_off, packet->tcphdr.th_x2, payload, length);
+ need8(packet->tcphdr.th_flags, payload, length);
+ need16(packet->tcphdr.th_win, payload, length);
+ need16(packet->tcphdr.th_sum, payload, length);
+ need16(packet->tcphdr.th_urp, payload, length);
+ if (packet->tcphdr.th_off > 5) {
+ packet->tcpopts_len = (packet->tcphdr.th_off - 5) * 4;
+ needxb(&(packet->tcpopts[0]), packet->tcpopts_len, payload, length);
+ packet->have_tcpopts = 1;
+ }
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_tcphdr = 1;
+
+ if (pcaplist->pcap_thread->callback_tcp)
+ pcaplist->pcap_thread->callback_tcp(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_tcp((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv4_frag.release(pcaplist->ipv4_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ if (release_frag)
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, payload, length);
+ else
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv4_frag.release(pcaplist->ipv4_frag_ctx, packet, payload, length);
+ }
+}
+
+static void pcap_thread_callback_ipv6(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+ int release_frag = 0;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ if (packet->have_ip6hdr) {
+ struct ip6_ext ext;
+ size_t already_advanced = 0;
+
+ layer_trace("have_ip6hdr");
+
+ /* Check reported length for missing payload or padding */
+ if (length < packet->ip6hdr.ip6_plen) {
+ layer_trace("length < ip6_plen");
+ packet->state = PCAP_THREAD_PACKET_INVALID_IPV6;
+ if (pcaplist->pcap_thread->callback_invalid) {
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+ return;
+ }
+ if (length > packet->ip6hdr.ip6_plen) {
+ layer_trace("have_ip6padding");
+ packet->ip6padding = length - packet->ip6hdr.ip6_plen;
+ packet->have_ip6padding = 1;
+ length -= packet->ip6padding;
+ }
+
+ ext.ip6e_nxt = packet->ip6hdr.ip6_nxt;
+ ext.ip6e_len = 0;
+ while (ext.ip6e_nxt != IPPROTO_NONE
+ && ext.ip6e_nxt != IPPROTO_GRE
+ && ext.ip6e_nxt != IPPROTO_ICMPV6
+ && ext.ip6e_nxt != IPPROTO_UDP
+ && ext.ip6e_nxt != IPPROTO_TCP) {
+ packet->state = PCAP_THREAD_PACKET_INVALID_IPV6HDR;
+
+ /*
+ * Advance to the start of next header, this may not be needed
+ * if it's the first header or if the header is supported.
+ */
+ if (ext.ip6e_len) {
+ if (ext.ip6e_len < already_advanced) {
+ /* Header length is invalid */
+ layer_trace("ip6hdr invalid");
+ break;
+ }
+ /* Advance if not already there */
+ else if (ext.ip6e_len > already_advanced) {
+ advancexb((ext.ip6e_len - already_advanced) * 8, payload, length);
+ }
+ already_advanced = 0;
+ } else if (already_advanced) {
+ /* Already advanced but header has no length */
+ layer_trace("ip6hdr already advanced");
+ break;
+ }
+
+ /* TODO: Store IPv6 headers? */
+
+ /* Handle supported headers */
+ if (ext.ip6e_nxt == IPPROTO_FRAGMENT) {
+ if (packet->have_ip6frag) {
+ layer_trace("dup ip6frag");
+ break;
+ }
+ layer_trace("ip6frag");
+ need8(ext.ip6e_nxt, payload, length);
+ need8(packet->ip6frag.ip6f_reserved, payload, length);
+ need16(packet->ip6frag.ip6f_offlg, payload, length);
+ need32(packet->ip6frag.ip6f_ident, payload, length);
+ packet->have_ip6frag = 1;
+ ext.ip6e_len = 1;
+ already_advanced = 1;
+ } else if (ext.ip6e_nxt == IPPROTO_ROUTING) {
+ struct ip6_rthdr rthdr;
+ struct in6_addr rt[255];
+
+ if (packet->have_ip6rtdst) {
+ layer_trace("dup ip6rtdst");
+ break;
+ }
+ need8(ext.ip6e_nxt, payload, length);
+ need8(ext.ip6e_len, payload, length);
+ need8(rthdr.ip6r_type, payload, length);
+ need8(rthdr.ip6r_segleft, payload, length);
+ if (!rthdr.ip6r_type) {
+ if (rthdr.ip6r_segleft > ext.ip6e_len)
+ break;
+ for (rthdr.ip6r_len = 0; rthdr.ip6r_len < ext.ip6e_len; rthdr.ip6r_len++, already_advanced += 2) {
+ needxb(&rt[rthdr.ip6r_len], 16, payload, length);
+ }
+ if (!rthdr.ip6r_len || rthdr.ip6r_len != ext.ip6e_len) {
+ break;
+ }
+ if (rthdr.ip6r_segleft) {
+ packet->ip6rtdst = rt[rthdr.ip6r_segleft];
+ packet->have_ip6rtdst = 1;
+ }
+ }
+ } else {
+ /* Nonsupported header */
+ layer_trace("ip6hdr?");
+ need8(ext.ip6e_nxt, payload, length);
+ need8(ext.ip6e_len, payload, length);
+ }
+
+ packet->state = PCAP_THREAD_PACKET_OK;
+
+ if (!ext.ip6e_len)
+ break;
+ }
+
+ for (; packet->state == PCAP_THREAD_PACKET_OK;) {
+ if (packet->have_ip6frag) {
+ packet->ip6frag_payload = ext.ip6e_nxt;
+
+ layer_trace("is_v6_frag");
+
+ if (pcaplist->pcap_thread->callback_ipv6_frag.reassemble) {
+ pcap_thread_packet_t* whole_packet = 0;
+ const u_char* whole_payload = 0;
+ size_t whole_length = 0;
+
+ packet->state = pcaplist->pcap_thread->callback_ipv6_frag.reassemble(pcaplist->ipv6_frag_ctx, packet, payload, length, &whole_packet, &whole_payload, &whole_length);
+
+ /* Defragmentation failed some how, we return packet as invalid */
+ if (packet->state != PCAP_THREAD_PACKET_OK) {
+ break;
+ }
+
+ /* No whole/defragmented packet return, need more */
+ if (!whole_packet || !whole_payload || !whole_length) {
+ return;
+ }
+
+ layer_tracef("v6_reasm %p %p %lu", whole_packet, whole_payload, whole_length);
+
+ packet = whole_packet;
+ payload = whole_payload;
+ length = whole_length;
+ release_frag = 1;
+ } else {
+ /*
+ * Mark packet as fragment and send it to the next user
+ * layer (if any) or return it as invalid.
+ */
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+
+ switch (ext.ip6e_nxt) {
+ case IPPROTO_GRE:
+ layer_trace("ipproto_gre frag");
+
+ if (!packet->ip6frag.ip6f_offlg) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_GREHDR;
+ need16(packet->grehdr.gre_flags, payload, length);
+ need16(packet->grehdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_grehdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_gre) {
+ pcaplist->pcap_thread->callback_gre(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ case IPPROTO_ICMPV6:
+ layer_trace("ipproto_icmpv6 frag");
+
+ if (!packet->ip6frag.ip6f_offlg) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_ICMPV6HDR;
+ need8(packet->icmpv6hdr.icmp6_type, payload, length);
+ need8(packet->icmpv6hdr.icmp6_code, payload, length);
+ need16(packet->icmpv6hdr.icmp6_cksum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_icmpv6hdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_icmpv6) {
+ pcaplist->pcap_thread->callback_icmpv6(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ case IPPROTO_UDP:
+ layer_trace("ipproto_udp frag");
+
+ if (!packet->ip6frag.ip6f_offlg) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_UDPHDR;
+ need16(packet->udphdr.uh_sport, payload, length);
+ need16(packet->udphdr.uh_dport, payload, length);
+ need16(packet->udphdr.uh_ulen, payload, length);
+ need16(packet->udphdr.uh_sum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_udphdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_udp) {
+ pcaplist->pcap_thread->callback_udp(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ case IPPROTO_TCP:
+ layer_trace("ipproto_tcp frag");
+
+ if (!packet->ip6frag.ip6f_offlg) {
+ for (;;) {
+ packet->state = PCAP_THREAD_PACKET_FRAGMENTED_TCPHDR;
+ need16(packet->tcphdr.th_sport, payload, length);
+ need16(packet->tcphdr.th_dport, payload, length);
+ need32(packet->tcphdr.th_seq, payload, length);
+ need32(packet->tcphdr.th_ack, payload, length);
+ need4x2(packet->tcphdr.th_off, packet->tcphdr.th_x2, payload, length);
+ need8(packet->tcphdr.th_flags, payload, length);
+ need16(packet->tcphdr.th_win, payload, length);
+ need16(packet->tcphdr.th_sum, payload, length);
+ need16(packet->tcphdr.th_urp, payload, length);
+ if (packet->tcphdr.th_off > 5) {
+ packet->tcpopts_len = (packet->tcphdr.th_off - 5) * 4;
+ needxb(&(packet->tcpopts[0]), packet->tcpopts_len, payload, length);
+ packet->have_tcpopts = 1;
+ }
+ packet->state = PCAP_THREAD_PACKET_IS_FRAGMENT;
+ packet->have_tcphdr = 1;
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_tcp) {
+ pcaplist->pcap_thread->callback_tcp(pcaplist->user, packet, payload, length);
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+ }
+
+ switch (ext.ip6e_nxt) {
+ case IPPROTO_GRE:
+ if (packet->have_grehdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_GRE;
+ need16(packet->grehdr.gre_flags, payload, length);
+ need16(packet->grehdr.ether_type, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_grehdr = 1;
+
+ if (pcaplist->pcap_thread->callback_gre)
+ pcaplist->pcap_thread->callback_gre(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_gre((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv6_frag.release(pcaplist->ipv6_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ case IPPROTO_ICMPV6:
+ layer_trace("ipproto_icmpv6");
+
+ if (packet->have_icmpv6hdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_ICMPV6;
+ need8(packet->icmpv6hdr.icmp6_type, payload, length);
+ need8(packet->icmpv6hdr.icmp6_code, payload, length);
+ need16(packet->icmpv6hdr.icmp6_cksum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_icmpv6hdr = 1;
+
+ if (pcaplist->pcap_thread->callback_icmpv6)
+ pcaplist->pcap_thread->callback_icmpv6(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_icmpv6((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv6_frag.release(pcaplist->ipv6_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ case IPPROTO_UDP:
+ if (packet->have_udphdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_UDP;
+ need16(packet->udphdr.uh_sport, payload, length);
+ need16(packet->udphdr.uh_dport, payload, length);
+ need16(packet->udphdr.uh_ulen, payload, length);
+ need16(packet->udphdr.uh_sum, payload, length);
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_udphdr = 1;
+
+ if (pcaplist->pcap_thread->callback_udp)
+ pcaplist->pcap_thread->callback_udp(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_udp((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv6_frag.release(pcaplist->ipv6_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ case IPPROTO_TCP:
+ if (packet->have_tcphdr)
+ break;
+
+ packet->state = PCAP_THREAD_PACKET_INVALID_TCP;
+ need16(packet->tcphdr.th_sport, payload, length);
+ need16(packet->tcphdr.th_dport, payload, length);
+ need32(packet->tcphdr.th_seq, payload, length);
+ need32(packet->tcphdr.th_ack, payload, length);
+ need4x2(packet->tcphdr.th_off, packet->tcphdr.th_x2, payload, length);
+ need8(packet->tcphdr.th_flags, payload, length);
+ need16(packet->tcphdr.th_win, payload, length);
+ need16(packet->tcphdr.th_sum, payload, length);
+ need16(packet->tcphdr.th_urp, payload, length);
+ if (packet->tcphdr.th_off > 5) {
+ packet->tcpopts_len = (packet->tcphdr.th_off - 5) * 4;
+ needxb(&(packet->tcpopts[0]), packet->tcpopts_len, payload, length);
+ packet->have_tcpopts = 1;
+ }
+ packet->state = PCAP_THREAD_PACKET_OK;
+ packet->have_tcphdr = 1;
+
+ if (pcaplist->pcap_thread->callback_tcp)
+ pcaplist->pcap_thread->callback_tcp(pcaplist->user, packet, payload, length);
+ else
+ pcap_thread_callback_tcp((void*)pcaplist, packet, payload, length);
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv6_frag.release(pcaplist->ipv6_frag_ctx, packet, payload, length);
+ }
+ return;
+
+ default:
+ packet->state = PCAP_THREAD_PACKET_UNSUPPORTED;
+ break;
+ }
+ break;
+ }
+ }
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ if (release_frag)
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, payload, length);
+ else
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+
+ if (release_frag) {
+ pcaplist->pcap_thread->callback_ipv6_frag.release(pcaplist->ipv6_frag_ctx, packet, payload, length);
+ }
+}
+
+static void pcap_thread_callback_icmp(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ /* TODO: Higher layer support? */
+ packet->state = PCAP_THREAD_PACKET_UNPROCESSED;
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_icmpv6(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ /* TODO: Higher layer support? */
+ packet->state = PCAP_THREAD_PACKET_UNPROCESSED;
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_udp(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ /* TODO: Higher layer support? */
+ packet->state = PCAP_THREAD_PACKET_UNPROCESSED;
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+static void pcap_thread_callback_tcp(u_char* user, pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ pcap_thread_pcaplist_t* pcaplist = (pcap_thread_pcaplist_t*)user;
+ const u_char* orig = payload;
+ size_t origlength = length;
+
+ if (!pcaplist) {
+ return;
+ }
+ if (!pcaplist->pcap_thread) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (!payload) {
+ return;
+ }
+ if (!length) {
+ return;
+ }
+
+ /* TODO: Higher layer support? */
+ packet->state = PCAP_THREAD_PACKET_UNPROCESSED;
+
+ if (pcaplist->pcap_thread->callback_invalid) {
+ if (packet->state == PCAP_THREAD_PACKET_OK)
+ packet->state = PCAP_THREAD_PACKET_INVALID;
+ pcaplist->pcap_thread->callback_invalid(pcaplist->user, packet, orig, origlength);
+ }
+}
+
+/*
+ * Open/Close
+ */
+
+static pcap_thread_pcaplist_t _pcaplist_defaults = PCAP_THREAD_PCAPLIST_T_INIT;
+
+int pcap_thread_open(pcap_thread_t* pcap_thread, const char* device, void* user)
+{
+ pcap_t* pcap;
+ pcap_thread_pcaplist_t* pcaplist;
+ int snapshot;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!device) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ if (!(pcaplist = malloc(sizeof(pcap_thread_pcaplist_t)))) {
+ return PCAP_THREAD_ENOMEM;
+ }
+ memcpy(pcaplist, &_pcaplist_defaults, sizeof(pcap_thread_pcaplist_t));
+ if (!(pcaplist->name = strdup(device))) {
+ free(pcaplist);
+ return PCAP_THREAD_ENOMEM;
+ }
+
+#ifdef HAVE_PCAP_CREATE
+ if (!(pcap = pcap_create(pcaplist->name, pcap_thread->errbuf))) {
+ free(pcaplist->name);
+ free(pcaplist);
+ return PCAP_THREAD_EPCAP;
+ }
+
+ if (pcap_thread->monitor) {
+ pcap_thread->status = pcap_can_set_rfmon(pcap);
+ if (pcap_thread->status == 0) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ return PCAP_THREAD_ENOMON;
+ }
+ if (pcap_thread->status != 1) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_can_set_rfmon()");
+ return PCAP_THREAD_EPCAP;
+ }
+ }
+
+#ifdef HAVE_PCAP_SET_TSTAMP_PRECISION
+ if (pcap_thread->have_timestamp_precision && (pcap_thread->status = pcap_set_tstamp_precision(pcap, pcap_thread->timestamp_precision))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_tstamp_precision()");
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+#ifdef HAVE_PCAP_SET_IMMEDIATE_MODE
+ if (pcap_thread->immediate_mode && (pcap_thread->status = pcap_set_immediate_mode(pcap, 1))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_immediate_mode()");
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+
+ if (pcap_thread->monitor && (pcap_thread->status = pcap_set_rfmon(pcap, 1))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_rfmon()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (pcap_thread->snaplen && (pcap_thread->status = pcap_set_snaplen(pcap, pcap_thread->snaplen))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_snaplen()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (pcap_thread->promiscuous && (pcap_thread->status = pcap_set_promisc(pcap, pcap_thread->promiscuous))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_promisc()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (pcap_thread->timeout && (pcap_thread->status = pcap_set_timeout(pcap, pcap_thread->timeout))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_timeout()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (pcap_thread->buffer_size && (pcap_thread->status = pcap_set_buffer_size(pcap, pcap_thread->buffer_size))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_buffer_size()");
+ return PCAP_THREAD_EPCAP;
+ }
+
+#ifdef HAVE_PCAP_SET_TSTAMP_TYPE
+ if (pcap_thread->have_timestamp_type && (pcap_thread->status = pcap_set_tstamp_type(pcap, pcap_thread->timestamp_type))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_set_tstamp_type()");
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+
+ if (pcap_thread->activate_mode == PCAP_THREAD_ACTIVATE_MODE_IMMEDIATE) {
+ if ((pcap_thread->status = pcap_activate(pcap))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_activate()");
+ return PCAP_THREAD_EPCAP;
+ }
+
+#ifdef HAVE_PCAP_SETDIRECTION
+#ifdef HAVE_PCAP_DIRECTION_T
+ if (pcap_thread->have_direction && (pcap_thread->status = pcap_setdirection(pcap, pcap_thread->direction))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_setdirection()");
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+#endif
+ }
+#else /* HAVE_PCAP_CREATE */
+ if (!(pcap = pcap_open_live(pcaplist->name, pcap_thread->snaplen, pcap_thread->promiscuous, pcap_thread->timeout, pcap_thread->errbuf))) {
+ free(pcaplist->name);
+ free(pcaplist);
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+
+ if (pcap_thread->activate_mode == PCAP_THREAD_ACTIVATE_MODE_IMMEDIATE) {
+ if (pcap_thread->filter) {
+ if ((pcap_thread->status = pcap_compile(pcap, &(pcaplist->bpf), pcap_thread->filter, pcap_thread->filter_optimize, pcap_thread->filter_netmask))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_compile()");
+ return PCAP_THREAD_EPCAP;
+ }
+ pcaplist->have_bpf = 1;
+ pcap_thread->filter_errno = 0;
+ errno = 0;
+ if ((pcap_thread->status = pcap_setfilter(pcap, &(pcaplist->bpf)))) {
+ pcap_freecode(&(pcaplist->bpf));
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_setfilter()");
+ return PCAP_THREAD_EPCAP;
+ }
+ pcap_thread->filter_errno = errno;
+ }
+
+ if ((snapshot = pcap_snapshot(pcap)) < 0) {
+ pcap_thread->status = snapshot;
+ if (pcaplist->have_bpf)
+ pcap_freecode(&(pcaplist->bpf));
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_snapshot()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (snapshot > pcap_thread->snapshot) {
+ pcap_thread->snapshot = snapshot;
+ }
+ }
+
+ pcaplist->pcap = pcap;
+ pcaplist->user = user;
+ if (pcap_thread->callback_ipv4_frag.new) {
+ pcaplist->ipv4_frag_ctx = pcap_thread->callback_ipv4_frag.new(pcap_thread->callback_ipv4_frag.conf, user);
+ pcaplist->have_ipv4_frag_ctx = 1;
+ }
+ if (pcap_thread->callback_ipv6_frag.new) {
+ pcaplist->ipv6_frag_ctx = pcap_thread->callback_ipv6_frag.new(pcap_thread->callback_ipv6_frag.conf, user);
+ pcaplist->have_ipv6_frag_ctx = 1;
+ }
+ if (pcap_thread->pcaplist) {
+ pcaplist->next = pcap_thread->pcaplist;
+ }
+ pcap_thread->pcaplist = pcaplist;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_open_offline(pcap_thread_t* pcap_thread, const char* file, void* user)
+{
+ pcap_t* pcap;
+ pcap_thread_pcaplist_t* pcaplist;
+ int snapshot;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!file) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ if (!(pcaplist = malloc(sizeof(pcap_thread_pcaplist_t)))) {
+ return PCAP_THREAD_ENOMEM;
+ }
+ memcpy(pcaplist, &_pcaplist_defaults, sizeof(pcap_thread_pcaplist_t));
+ pcaplist->is_offline = 1;
+ if (!(pcaplist->name = strdup(file))) {
+ free(pcaplist);
+ return PCAP_THREAD_ENOMEM;
+ }
+
+#ifdef HAVE_PCAP_OPEN_OFFLINE_WITH_TSTAMP_PRECISION
+ if (pcap_thread->have_timestamp_precision) {
+ if (!(pcap = pcap_open_offline_with_tstamp_precision(pcaplist->name, pcap_thread->timestamp_precision, pcap_thread->errbuf))) {
+ free(pcaplist->name);
+ free(pcaplist);
+ return PCAP_THREAD_EPCAP;
+ }
+ } else
+#endif
+ {
+ if (!(pcap = pcap_open_offline(pcaplist->name, pcap_thread->errbuf))) {
+ free(pcaplist->name);
+ free(pcaplist);
+ return PCAP_THREAD_EPCAP;
+ }
+ }
+
+ if (pcap_thread->filter) {
+ if ((pcap_thread->status = pcap_compile(pcap, &(pcaplist->bpf), pcap_thread->filter, pcap_thread->filter_optimize, pcap_thread->filter_netmask))) {
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_compile()");
+ return PCAP_THREAD_EPCAP;
+ }
+ pcaplist->have_bpf = 1;
+ pcap_thread->filter_errno = 0;
+ errno = 0;
+ if ((pcap_thread->status = pcap_setfilter(pcap, &(pcaplist->bpf)))) {
+ pcap_freecode(&(pcaplist->bpf));
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_setfilter()");
+ return PCAP_THREAD_EPCAP;
+ }
+ pcap_thread->filter_errno = errno;
+ }
+
+ if ((snapshot = pcap_snapshot(pcap)) < 0) {
+ pcap_thread->status = snapshot;
+ if (pcaplist->have_bpf)
+ pcap_freecode(&(pcaplist->bpf));
+ pcap_close(pcap);
+ free(pcaplist->name);
+ free(pcaplist);
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_snapshot()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (snapshot > pcap_thread->snapshot) {
+ pcap_thread->snapshot = snapshot;
+ }
+
+ pcaplist->pcap = pcap;
+ pcaplist->user = user;
+ if (pcap_thread->callback_ipv4_frag.new) {
+ pcaplist->ipv4_frag_ctx = pcap_thread->callback_ipv4_frag.new(pcap_thread->callback_ipv4_frag.conf, user);
+ pcaplist->have_ipv4_frag_ctx = 1;
+ }
+ if (pcap_thread->callback_ipv6_frag.new) {
+ pcaplist->ipv6_frag_ctx = pcap_thread->callback_ipv6_frag.new(pcap_thread->callback_ipv6_frag.conf, user);
+ pcaplist->have_ipv6_frag_ctx = 1;
+ }
+ if (pcap_thread->pcaplist) {
+ pcaplist->next = pcap_thread->pcaplist;
+ }
+ pcap_thread->pcaplist = pcaplist;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_add(pcap_thread_t* pcap_thread, const char* name, pcap_t* pcap, void* user)
+{
+ (void)pcap_thread;
+ (void)name;
+ (void)pcap;
+ (void)user;
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ return PCAP_THREAD_EOBSOLETE;
+}
+
+int pcap_thread_activate(pcap_thread_t* pcap_thread)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+ int snapshot;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ pcap_thread->filter_errno = 0;
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ if (pcaplist->is_offline) {
+ continue;
+ }
+
+#ifdef HAVE_PCAP_ACTIVATE
+ if ((pcap_thread->status = pcap_activate(pcaplist->pcap))) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_activate()");
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+
+#ifdef HAVE_PCAP_SETDIRECTION
+#ifdef HAVE_PCAP_DIRECTION_T
+ if (pcap_thread->have_direction && (pcap_thread->status = pcap_setdirection(pcaplist->pcap, pcap_thread->direction))) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_setdirection()");
+ return PCAP_THREAD_EPCAP;
+ }
+#endif
+#endif
+
+ if (pcap_thread->filter) {
+ if (pcaplist->have_bpf)
+ pcap_freecode(&(pcaplist->bpf));
+ if ((pcap_thread->status = pcap_compile(pcaplist->pcap, &(pcaplist->bpf), pcap_thread->filter, pcap_thread->filter_optimize, pcap_thread->filter_netmask))) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_compile()");
+ return PCAP_THREAD_EPCAP;
+ }
+ pcaplist->have_bpf = 1;
+ errno = 0;
+ if ((pcap_thread->status = pcap_setfilter(pcaplist->pcap, &(pcaplist->bpf)))) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_setfilter()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (errno && !pcap_thread->filter_errno)
+ pcap_thread->filter_errno = errno;
+ }
+
+ if ((snapshot = pcap_snapshot(pcaplist->pcap)) < 0) {
+ pcap_thread->status = snapshot;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_snapshot()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (snapshot > pcap_thread->snapshot) {
+ pcap_thread->snapshot = snapshot;
+ }
+ }
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_close(pcap_thread_t* pcap_thread)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ while (pcap_thread->pcaplist) {
+ pcaplist = pcap_thread->pcaplist;
+ pcap_thread->pcaplist = pcaplist->next;
+
+ if (pcap_thread->callback_ipv4_frag.free && pcaplist->have_ipv4_frag_ctx) {
+ pcap_thread->callback_ipv4_frag.free(pcaplist->ipv4_frag_ctx);
+ }
+ if (pcap_thread->callback_ipv6_frag.free && pcaplist->have_ipv6_frag_ctx) {
+ pcap_thread->callback_ipv6_frag.free(pcaplist->ipv6_frag_ctx);
+ }
+
+ if (pcaplist->pcap) {
+ pcap_close(pcaplist->pcap);
+ }
+ if (pcaplist->have_bpf) {
+ pcap_freecode(&(pcaplist->bpf));
+ }
+ if (pcaplist->name) {
+ free(pcaplist->name);
+ }
+ free(pcaplist);
+ }
+ pcap_thread->step = 0;
+
+#ifdef HAVE_PTHREAD
+ if (pcap_thread->pkthdr) {
+ free(pcap_thread->pkthdr);
+ pcap_thread->pkthdr = 0;
+ }
+ if (pcap_thread->pkt) {
+ free(pcap_thread->pkt);
+ pcap_thread->pkt = 0;
+ }
+ if (pcap_thread->pcaplist_pkt) {
+ free(pcap_thread->pcaplist_pkt);
+ pcap_thread->pcaplist_pkt = 0;
+ }
+#endif
+
+ return PCAP_THREAD_OK;
+}
+
+/*
+ * Engine
+ */
+
+#ifdef HAVE_PTHREAD
+static void _callback(u_char* user, const struct pcap_pkthdr* pkthdr, const u_char* pkt)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+ pcap_thread_t* pcap_thread;
+
+ pthread_testcancel();
+
+ if (!user) {
+ return;
+ }
+ pcaplist = (pcap_thread_pcaplist_t*)user;
+
+ if (!pcaplist->pcap_thread) {
+ pcaplist->running = 0;
+ return;
+ }
+ pcap_thread = pcaplist->pcap_thread;
+
+ if (pkthdr->caplen > pcap_thread->snapshot) {
+ if (pcap_thread->dropback) {
+ pcap_thread->dropback(pcaplist->user, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ }
+ return;
+ }
+
+ if (pcap_thread->queue_mode == PCAP_THREAD_QUEUE_MODE_DIRECT) {
+ if (pcap_thread->callback) {
+ pcap_thread->callback(pcaplist->user, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ } else if (pcaplist->layer_callback) {
+ pcaplist->layer_callback((void*)pcaplist, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ } else if (pcap_thread->dropback) {
+ pcap_thread->dropback(pcaplist->user, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ }
+ return;
+ }
+
+ if (pthread_mutex_lock(&(pcap_thread->mutex))) {
+ if (pcap_thread->dropback) {
+ pcap_thread->dropback(pcaplist->user, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ }
+ return;
+ }
+
+ while (pcaplist->running && pcap_thread->running) {
+ if (pcap_thread->pkts < pcap_thread->queue_size) {
+ pcap_thread->pcaplist_pkt[pcap_thread->write_pos] = pcaplist;
+ memcpy(&(pcap_thread->pkthdr[pcap_thread->write_pos]), pkthdr, sizeof(struct pcap_pkthdr));
+ memcpy(&(pcap_thread->pkt[pcap_thread->write_pos * pcap_thread->snapshot]), pkt, pkthdr->caplen);
+ pcap_thread->write_pos++;
+ if (pcap_thread->write_pos == pcap_thread->queue_size) {
+ pcap_thread->write_pos = 0;
+ }
+ pcap_thread->pkts++;
+
+ pthread_cond_signal(&(pcap_thread->have_packets));
+ break;
+ }
+
+ if (pthread_cond_wait(&(pcap_thread->can_write), &(pcap_thread->mutex))) {
+ pcaplist->running = 0;
+ pcap_breakloop(pcaplist->pcap);
+ return;
+ }
+ continue;
+ }
+
+ if (pthread_mutex_unlock(&(pcap_thread->mutex))) {
+ pcaplist->running = 0;
+ pcap_breakloop(pcaplist->pcap);
+ return;
+ }
+}
+
+static void* _thread(void* vp)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+ int ret = 0;
+
+ /*pthread_detach(pthread_self());*/
+
+ if (!vp) {
+ return 0;
+ }
+ pcaplist = (pcap_thread_pcaplist_t*)vp;
+
+ if (!pcaplist->pcap_thread) {
+ pcaplist->running = 0;
+ return 0;
+ }
+
+ /*
+ * pcap_loop() might return -2 to indicate pcap_breakloop() was called
+ * but we do not need to act on that because either this thread has
+ * been cancelled or running has been cleared
+ */
+ while (pcaplist->running) {
+ pthread_testcancel();
+ ret = pcap_loop(pcaplist->pcap, -1, _callback, (u_char*)pcaplist);
+ if (ret == -1) {
+ /* TODO: Store pcap_loop() error */
+ break;
+ }
+ if (!ret)
+ break;
+ }
+
+ pcaplist->running = 0;
+
+ pthread_mutex_lock(&(pcaplist->pcap_thread->mutex));
+ pthread_cond_signal(&(pcaplist->pcap_thread->have_packets));
+ pthread_mutex_unlock(&(pcaplist->pcap_thread->mutex));
+
+ return 0;
+}
+#endif
+
+static void _callback2(u_char* user, const struct pcap_pkthdr* pkthdr, const u_char* pkt)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+
+ if (!user) {
+ return;
+ }
+ pcaplist = (pcap_thread_pcaplist_t*)user;
+
+ if (!pcaplist->pcap_thread) {
+ pcaplist->running = 0;
+ return;
+ }
+ if (pcaplist->pcap_thread->callback) {
+ pcaplist->pcap_thread->callback(pcaplist->user, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ } else if (pcaplist->layer_callback) {
+ pcaplist->layer_callback((void*)pcaplist, pkthdr, pkt, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ } else {
+ pcaplist->running = 0;
+ }
+}
+
+int pcap_thread_run(pcap_thread_t* pcap_thread)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+ int run = 1, timedrun = 0;
+ struct timeval start = { 0, 0 };
+ struct timespec end = { 0, 0 };
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!pcap_thread->pcaplist) {
+ return PCAP_THREAD_NOPCAPS;
+ }
+ if (!pcap_thread->callback && !pcap_thread->use_layers) {
+ return PCAP_THREAD_NOCALLBACK;
+ }
+ if (pcap_thread->use_layers
+ && !(pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp)) {
+ return PCAP_THREAD_NOCALLBACK;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ if (pcap_thread->timedrun.tv_sec || pcap_thread->timedrun.tv_usec) {
+ timedrun = 1;
+ if (gettimeofday(&start, 0)) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "gettimeofday()");
+ return PCAP_THREAD_ERRNO;
+ }
+
+ end.tv_sec = start.tv_sec + pcap_thread->timedrun.tv_sec
+ + ((start.tv_usec + pcap_thread->timedrun.tv_usec) / 1000000);
+ end.tv_nsec = ((start.tv_usec + pcap_thread->timedrun.tv_usec) % 1000000) * 1000;
+ } else if (pcap_thread->timedrun_to.tv_sec) {
+ timedrun = 1;
+
+ end.tv_sec = pcap_thread->timedrun_to.tv_sec;
+ end.tv_nsec = pcap_thread->timedrun_to.tv_usec * 1000;
+ }
+
+#ifdef HAVE_PTHREAD
+ if (pcap_thread->use_threads) {
+ int err, all_offline;
+
+ switch (pcap_thread->queue_mode) {
+ case PCAP_THREAD_QUEUE_MODE_COND:
+ case PCAP_THREAD_QUEUE_MODE_DIRECT:
+ if ((err = pthread_mutex_lock(&(pcap_thread->mutex)))) {
+ errno = err;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pthread_mutex_lock()");
+ return PCAP_THREAD_ERRNO;
+ }
+ break;
+ case PCAP_THREAD_QUEUE_MODE_WAIT:
+ case PCAP_THREAD_QUEUE_MODE_YIELD:
+ case PCAP_THREAD_QUEUE_MODE_DROP:
+ return PCAP_THREAD_EOBSOLETE;
+ default:
+ return PCAP_THREAD_EINVAL;
+ }
+
+ if (pcap_thread->running) {
+ pthread_mutex_unlock(&(pcap_thread->mutex));
+ return PCAP_THREAD_ERUNNING;
+ }
+
+ if (pcap_thread->pkthdr) {
+ free(pcap_thread->pkthdr);
+ }
+ if (!(pcap_thread->pkthdr = calloc(pcap_thread->queue_size, sizeof(struct pcap_pkthdr)))) {
+ pthread_mutex_unlock(&(pcap_thread->mutex));
+ return PCAP_THREAD_ENOMEM;
+ }
+
+ if (pcap_thread->pkt) {
+ free(pcap_thread->pkt);
+ }
+ if (!(pcap_thread->pkt = calloc(pcap_thread->queue_size, pcap_thread->snapshot))) {
+ pthread_mutex_unlock(&(pcap_thread->mutex));
+ return PCAP_THREAD_ENOMEM;
+ }
+
+ if (pcap_thread->pcaplist_pkt) {
+ free(pcap_thread->pcaplist_pkt);
+ }
+ if (!(pcap_thread->pcaplist_pkt = calloc(pcap_thread->queue_size, sizeof(pcap_thread_pcaplist_t*)))) {
+ pthread_mutex_unlock(&(pcap_thread->mutex));
+ return PCAP_THREAD_ENOMEM;
+ }
+
+ pcap_thread->read_pos = 0;
+ pcap_thread->write_pos = 0;
+ pcap_thread->pkts = 0;
+
+ all_offline = 1;
+ for (pcaplist = pcap_thread->pcaplist; all_offline && pcaplist; pcaplist = pcaplist->next) {
+ if (!pcaplist->is_offline) {
+ all_offline = 0;
+ break;
+ }
+ }
+
+ pcap_thread->running = 1;
+ pcap_thread->was_stopped = 0;
+ err = PCAP_THREAD_OK;
+
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ pcaplist->pcap_thread = pcap_thread;
+ if (pcap_thread->use_layers) {
+ pcaplist->layer_callback = &pcap_thread_callback;
+ }
+ if (pcap_thread->callback_ipv4_frag.new && !pcaplist->have_ipv4_frag_ctx) {
+ pcaplist->ipv4_frag_ctx = pcap_thread->callback_ipv4_frag.new(pcap_thread->callback_ipv4_frag.conf, pcaplist->user);
+ pcaplist->have_ipv4_frag_ctx = 1;
+ }
+ if (pcap_thread->callback_ipv6_frag.new && !pcaplist->have_ipv6_frag_ctx) {
+ pcaplist->ipv6_frag_ctx = pcap_thread->callback_ipv6_frag.new(pcap_thread->callback_ipv6_frag.conf, pcaplist->user);
+ pcaplist->have_ipv6_frag_ctx = 1;
+ }
+ pcaplist->running = 1;
+
+ if ((err = pthread_create(&(pcaplist->thread), 0, _thread, (void*)pcaplist))) {
+ errno = err;
+ err = PCAP_THREAD_ERRNO;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pthread_create()");
+ break;
+ }
+ }
+
+ while (err == PCAP_THREAD_OK && run && pcap_thread->running) {
+ while (pcap_thread->pkts) {
+ if (!pcap_thread->pcaplist_pkt[pcap_thread->read_pos]) {
+ err = PCAP_THREAD_ENOPCAPLIST;
+ break;
+ }
+
+ if (pcap_thread->callback) {
+ pcap_thread->callback(
+ pcap_thread->pcaplist_pkt[pcap_thread->read_pos]->user,
+ &(pcap_thread->pkthdr[pcap_thread->read_pos]),
+ &(pcap_thread->pkt[pcap_thread->read_pos * pcap_thread->snapshot]),
+ pcap_thread->pcaplist_pkt[pcap_thread->read_pos]->name,
+ pcap_datalink(pcap_thread->pcaplist_pkt[pcap_thread->read_pos]->pcap));
+ } else {
+ pcap_thread_callback(
+ (void*)pcap_thread->pcaplist_pkt[pcap_thread->read_pos],
+ &(pcap_thread->pkthdr[pcap_thread->read_pos]),
+ &(pcap_thread->pkt[pcap_thread->read_pos * pcap_thread->snapshot]),
+ pcap_thread->pcaplist_pkt[pcap_thread->read_pos]->name,
+ pcap_datalink(pcap_thread->pcaplist_pkt[pcap_thread->read_pos]->pcap));
+ }
+
+ pcap_thread->pcaplist_pkt[pcap_thread->read_pos] = 0;
+ pcap_thread->read_pos++;
+ if (pcap_thread->read_pos == pcap_thread->queue_size) {
+ pcap_thread->read_pos = 0;
+ }
+ pcap_thread->pkts--;
+ }
+
+ if (err != PCAP_THREAD_OK)
+ break;
+
+ if ((err = pthread_cond_broadcast(&(pcap_thread->can_write)))) {
+ errno = err;
+ err = PCAP_THREAD_ERRNO;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pthread_cond_broadcast()");
+ break;
+ }
+
+ run = 0;
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ if (pcaplist->running) {
+ run = 1;
+ }
+ }
+ if (!run)
+ break;
+
+ if (timedrun) {
+ struct timeval now;
+
+ if (gettimeofday(&now, 0)) {
+ err = PCAP_THREAD_ERRNO;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "gettimeofday()");
+ break;
+ }
+
+ if (now.tv_sec > end.tv_sec
+ || (now.tv_sec == end.tv_sec && (now.tv_usec * 1000) >= end.tv_nsec)) {
+ break;
+ }
+
+ err = pthread_cond_timedwait(&(pcap_thread->have_packets), &(pcap_thread->mutex), &end);
+ if (err == ETIMEDOUT) {
+ err = PCAP_THREAD_OK;
+ } else if (err) {
+ errno = err;
+ err = PCAP_THREAD_ERRNO;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pthread_cond_timedwait()");
+ break;
+ }
+ } else {
+ if ((err = pthread_cond_wait(&(pcap_thread->have_packets), &(pcap_thread->mutex)))) {
+ errno = err;
+ err = PCAP_THREAD_ERRNO;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pthread_cond_wait()");
+ break;
+ }
+ }
+ }
+
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ pcaplist->running = 0;
+ pcap_breakloop(pcaplist->pcap);
+ if (pcaplist->thread) {
+ pthread_cancel(pcaplist->thread);
+ }
+ }
+
+ pthread_mutex_unlock(&(pcap_thread->mutex));
+
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ if (pcaplist->thread) {
+ pthread_join(pcaplist->thread, 0);
+ pcaplist->thread = 0;
+ }
+ }
+
+ pcap_thread->running = 0;
+ return err;
+ } else
+#endif
+ {
+ fd_set fds, rfds;
+ int max_fd = 0;
+ struct timeval t1, t2;
+
+ pcap_thread->running = 1;
+ pcap_thread->was_stopped = 0;
+
+ FD_ZERO(&fds);
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ int fd = pcap_get_selectable_fd(pcaplist->pcap);
+
+ FD_SET(fd, &fds);
+ if (fd > max_fd)
+ max_fd = fd;
+
+ if (!pcaplist->is_offline && (pcap_thread->status = pcap_setnonblock(pcaplist->pcap, 1, pcap_thread->errbuf))) {
+ pcap_thread->running = 0;
+ return PCAP_THREAD_EPCAP;
+ }
+ pcaplist->pcap_thread = pcap_thread;
+ if (pcap_thread->use_layers) {
+ pcaplist->layer_callback = &pcap_thread_callback;
+ }
+ if (pcap_thread->callback_ipv4_frag.new && !pcaplist->have_ipv4_frag_ctx) {
+ pcaplist->ipv4_frag_ctx = pcap_thread->callback_ipv4_frag.new(pcap_thread->callback_ipv4_frag.conf, pcaplist->user);
+ pcaplist->have_ipv4_frag_ctx = 1;
+ }
+ if (pcap_thread->callback_ipv6_frag.new && !pcaplist->have_ipv6_frag_ctx) {
+ pcaplist->ipv6_frag_ctx = pcap_thread->callback_ipv6_frag.new(pcap_thread->callback_ipv6_frag.conf, pcaplist->user);
+ pcaplist->have_ipv6_frag_ctx = 1;
+ }
+ pcaplist->running = 1;
+ }
+
+ t1.tv_sec = pcap_thread->timeout / 1000;
+ t1.tv_usec = (pcap_thread->timeout % 1000) * 1000;
+ max_fd++;
+ while (run) {
+ rfds = fds;
+ t2 = t1;
+ if (timedrun) {
+ struct timeval now;
+ struct timeval diff;
+
+ if (gettimeofday(&now, 0)) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "gettimeofday()");
+ pcap_thread->running = 0;
+ return PCAP_THREAD_ERRNO;
+ }
+ if (now.tv_sec > end.tv_sec
+ || (now.tv_sec == end.tv_sec && (now.tv_usec * 1000) >= end.tv_nsec)) {
+ break;
+ }
+
+ if (end.tv_sec > now.tv_sec) {
+ diff.tv_sec = end.tv_sec - now.tv_sec - 1;
+ diff.tv_usec = 1000000 - now.tv_usec;
+ diff.tv_usec += end.tv_nsec / 1000;
+ if (diff.tv_usec > 1000000) {
+ diff.tv_sec += diff.tv_usec / 1000000;
+ diff.tv_usec %= 1000000;
+ }
+ } else {
+ diff.tv_sec = 0;
+ if (end.tv_sec == now.tv_sec && (end.tv_nsec / 1000) > now.tv_usec) {
+ diff.tv_usec = (end.tv_nsec / 1000) - now.tv_usec;
+ } else {
+ diff.tv_usec = 0;
+ }
+ }
+
+ if (diff.tv_sec < t1.tv_sec || (diff.tv_sec == t1.tv_sec && diff.tv_usec < t1.tv_usec)) {
+ t2 = diff;
+ }
+ }
+ if (select(max_fd, &rfds, 0, 0, &t2) == -1) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "select()");
+ pcap_thread->running = 0;
+ return PCAP_THREAD_ERRNO;
+ }
+
+ run = 0;
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ int packets;
+
+ if (!pcaplist->running) {
+ continue;
+ } else {
+ run = 1;
+ }
+
+ packets = pcap_dispatch(pcaplist->pcap, -1, _callback2, (u_char*)pcaplist);
+ if (packets == -1) {
+ pcap_thread->status = -1;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_dispatch()");
+ pcap_thread->running = 0;
+ return PCAP_THREAD_EPCAP;
+ } else if (packets == -2 || (pcaplist->is_offline && !packets)) {
+ pcaplist->running = 0;
+ }
+ }
+ }
+
+ pcap_thread->running = 0;
+ }
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_next(pcap_thread_t* pcap_thread)
+{
+ const u_char* pkt;
+ struct pcap_pkthdr pkthdr;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!pcap_thread->callback && !pcap_thread->use_layers) {
+ return PCAP_THREAD_NOCALLBACK;
+ }
+ if (pcap_thread->use_layers
+ && !(pcap_thread->callback_linux_sll
+ || pcap_thread->callback_ether
+ || pcap_thread->callback_null
+ || pcap_thread->callback_loop
+ || pcap_thread->callback_ieee802
+ || pcap_thread->callback_gre
+ || pcap_thread->callback_ip
+ || pcap_thread->callback_ipv4
+ || pcap_thread->callback_ipv6
+ || pcap_thread->callback_icmp
+ || pcap_thread->callback_icmpv6
+ || pcap_thread->callback_udp
+ || pcap_thread->callback_tcp)) {
+ return PCAP_THREAD_NOCALLBACK;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+ if (!pcap_thread->pcaplist) {
+ return PCAP_THREAD_NOPCAPS;
+ }
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ if (!pcap_thread->step) {
+ pcap_thread->step = pcap_thread->pcaplist;
+ }
+ if (!pcap_thread->step) {
+ return PCAP_THREAD_OK;
+ }
+
+ pcap_thread->step->pcap_thread = pcap_thread;
+ if (pcap_thread->callback_ipv4_frag.new && !pcap_thread->step->have_ipv4_frag_ctx) {
+ pcap_thread->step->ipv4_frag_ctx = pcap_thread->callback_ipv4_frag.new(pcap_thread->callback_ipv4_frag.conf, pcap_thread->step->user);
+ pcap_thread->step->have_ipv4_frag_ctx = 1;
+ }
+ if (pcap_thread->callback_ipv6_frag.new && !pcap_thread->step->have_ipv6_frag_ctx) {
+ pcap_thread->step->ipv6_frag_ctx = pcap_thread->callback_ipv6_frag.new(pcap_thread->callback_ipv6_frag.conf, pcap_thread->step->user);
+ pcap_thread->step->have_ipv6_frag_ctx = 1;
+ }
+
+ if (!(pkt = pcap_next(pcap_thread->step->pcap, &pkthdr))) {
+ pcap_thread->status = -1;
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_next()");
+ return PCAP_THREAD_EPCAP;
+ }
+ if (pcap_thread->callback) {
+ pcap_thread->callback(pcap_thread->step->user, &pkthdr, pkt, pcap_thread->step->name, pcap_datalink(pcap_thread->step->pcap));
+ } else {
+ pcap_thread_callback((void*)pcap_thread->step, &pkthdr, pkt, pcap_thread->step->name, pcap_datalink(pcap_thread->step->pcap));
+ }
+ pcap_thread->step = pcap_thread->step->next;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_next_reset(pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (pcap_thread->running) {
+ return PCAP_THREAD_ERUNNING;
+ }
+ if (!pcap_thread->pcaplist) {
+ return PCAP_THREAD_NOPCAPS;
+ }
+
+ pcap_thread->step = 0;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_stop(pcap_thread_t* pcap_thread)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!pcap_thread->pcaplist) {
+ return PCAP_THREAD_NOPCAPS;
+ }
+
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ pcaplist->running = 0;
+ pcap_breakloop(pcaplist->pcap);
+ }
+ pcap_thread->running = 0;
+ pcap_thread->was_stopped = 1;
+
+#ifdef HAVE_PTHREAD
+ pthread_cond_broadcast(&(pcap_thread->have_packets));
+ pthread_cond_broadcast(&(pcap_thread->can_write));
+#endif
+
+ return PCAP_THREAD_OK;
+}
+
+/*
+ * Stats
+ */
+
+int pcap_thread_stats(pcap_thread_t* pcap_thread, pcap_thread_stats_callback_t callback, u_char* user)
+{
+ pcap_thread_pcaplist_t* pcaplist;
+ struct pcap_stat stats;
+
+ if (!pcap_thread) {
+ return PCAP_THREAD_EINVAL;
+ }
+ if (!callback) {
+ return PCAP_THREAD_NOCALLBACK;
+ }
+ if (!pcap_thread->pcaplist) {
+ return PCAP_THREAD_NOPCAPS;
+ }
+
+ if (pcap_thread->errbuf[0]) {
+ memset(pcap_thread->errbuf, 0, sizeof(pcap_thread->errbuf));
+ }
+ pcap_thread->status = 0;
+
+ for (pcaplist = pcap_thread->pcaplist; pcaplist; pcaplist = pcaplist->next) {
+ if (pcaplist->is_offline)
+ continue;
+ if ((pcap_thread->status = pcap_stats(pcaplist->pcap, &stats))) {
+ PCAP_THREAD_SET_ERRBUF(pcap_thread, "pcap_stats()");
+ return PCAP_THREAD_EPCAP;
+ }
+ callback(user, &stats, pcaplist->name, pcap_datalink(pcaplist->pcap));
+ }
+
+ return PCAP_THREAD_OK;
+}
+
+/*
+ * Error handling
+ */
+
+int pcap_thread_status(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return 0;
+ }
+
+ return pcap_thread->status;
+}
+
+const char* pcap_thread_errbuf(const pcap_thread_t* pcap_thread)
+{
+ if (!pcap_thread) {
+ return 0;
+ }
+
+ return pcap_thread->errbuf;
+}
+
+const char* pcap_thread_strerr(int error)
+{
+ switch (error) {
+ case PCAP_THREAD_OK:
+ return 0;
+ case PCAP_THREAD_EPCAP:
+ return PCAP_THREAD_EPCAP_STR;
+ case PCAP_THREAD_ENOMEM:
+ return PCAP_THREAD_ENOMEM_STR;
+ case PCAP_THREAD_ENOMON:
+ return PCAP_THREAD_ENOMON_STR;
+ case PCAP_THREAD_ENODIR:
+ return PCAP_THREAD_ENODIR_STR;
+ case PCAP_THREAD_EINVAL:
+ return PCAP_THREAD_EINVAL_STR;
+ case PCAP_THREAD_EWOULDBLOCK:
+ return PCAP_THREAD_EWOULDBLOCK_STR;
+ case PCAP_THREAD_NOPCAPS:
+ return PCAP_THREAD_NOPCAPS_STR;
+ case PCAP_THREAD_NOCALLBACK:
+ return PCAP_THREAD_NOCALLBACK_STR;
+ case PCAP_THREAD_ERRNO:
+ return PCAP_THREAD_ERRNO_STR;
+ case PCAP_THREAD_NOYIELD:
+ return PCAP_THREAD_NOYIELD_STR;
+ case PCAP_THREAD_EOBSOLETE:
+ return PCAP_THREAD_EOBSOLETE_STR;
+ case PCAP_THREAD_ERUNNING:
+ return PCAP_THREAD_ERUNNING_STR;
+ case PCAP_THREAD_ENOPCAPLIST:
+ return PCAP_THREAD_ENOPCAPLIST_STR;
+ case PCAP_THREAD_ELAYERCB:
+ return PCAP_THREAD_ELAYERCB_STR;
+ }
+ return "UNKNOWN";
+}
diff --git a/src/pcap-thread/pcap_thread.h b/src/pcap-thread/pcap_thread.h
new file mode 100644
index 0000000..ce43b5a
--- /dev/null
+++ b/src/pcap-thread/pcap_thread.h
@@ -0,0 +1,640 @@
+/*
+ * Author Jerry Lundström <jerry@dns-oarc.net>
+ * Copyright (c) 2016-2017, OARC, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __pcap_thread_h
+#define __pcap_thread_h
+
+#ifdef HAVE_PTHREAD
+#include <pthread.h>
+#endif
+#include <pcap/pcap.h>
+#include <sys/socket.h>
+#ifdef TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <net/if_arp.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#ifdef HAVE_ENDIAN_H
+#include <endian.h>
+#endif
+#ifdef HAVE_SYS_ENDIAN_H
+#include <sys/endian.h>
+#endif
+#ifdef HAVE_MACHINE_ENDIAN_H
+#include <machine/endian.h>
+#endif
+
+#ifndef __BYTE_ORDER
+#if defined(BYTE_ORDER)
+#define __BYTE_ORDER BYTE_ORDER
+#elif defined(_BYTE_ORDER)
+#define __BYTE_ORDER _BYTE_ORDER
+#else
+#error "No endian byte order define, please fix"
+#endif
+#endif
+#ifndef __LITTLE_ENDIAN
+#if defined(LITTLE_ENDIAN)
+#define __LITTLE_ENDIAN LITTLE_ENDIAN
+#elif defined(_LITTLE_ENDIAN)
+#define __LITTLE_ENDIAN _LITTLE_ENDIAN
+#else
+#error "No little endian define, please fix"
+#endif
+#endif
+#ifndef __BIG_ENDIAN
+#if defined(BIG_ENDIAN)
+#define __BIG_ENDIAN BIG_ENDIAN
+#elif defined(_BIG_ENDIAN)
+#define __BIG_ENDIAN _BIG_ENDIAN
+#else
+#error "No big endian define, please fix"
+#endif
+#endif
+
+#ifndef PCAP_NETMASK_UNKNOWN
+#define PCAP_NETMASK_UNKNOWN 0xffffffff
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* clang-format off */
+
+#define PCAP_THREAD_VERSION_STR "4.0.0"
+#define PCAP_THREAD_VERSION_MAJOR 4
+#define PCAP_THREAD_VERSION_MINOR 0
+#define PCAP_THREAD_VERSION_PATCH 0
+
+#define PCAP_THREAD_DEFAULT_TIMEOUT 1000
+#define PCAP_THREAD_DEFAULT_QUEUE_SIZE 64
+#define PCAP_THREAD_DEFAULT_QUEUE_MODE PCAP_THREAD_QUEUE_MODE_COND
+#define PCAP_THREAD_DEFAULT_ACTIVATE_MODE PCAP_THREAD_ACTIVATE_MODE_IMMEDIATE
+
+#define PCAP_THREAD_OK 0
+#define PCAP_THREAD_EPCAP 1
+#define PCAP_THREAD_ENOMEM 2
+#define PCAP_THREAD_ENOMON 3
+#define PCAP_THREAD_ENODIR 4
+#define PCAP_THREAD_EINVAL 5
+#define PCAP_THREAD_EWOULDBLOCK 6
+#define PCAP_THREAD_NOPCAPS 7
+#define PCAP_THREAD_NOCALLBACK 8
+#define PCAP_THREAD_ERRNO 9
+#define PCAP_THREAD_NOYIELD 10
+#define PCAP_THREAD_EOBSOLETE 11
+#define PCAP_THREAD_ERUNNING 12
+#define PCAP_THREAD_ENOPCAPLIST 13
+#define PCAP_THREAD_ELAYERCB 14
+
+#define PCAP_THREAD_EPCAP_STR "libpcap error"
+#define PCAP_THREAD_ENOMEM_STR "out of memory"
+#define PCAP_THREAD_ENOMON_STR "monitor mode requested but not supported"
+#define PCAP_THREAD_ENODIR_STR "direction specified but not supported"
+#define PCAP_THREAD_EINVAL_STR "invalid argument"
+#define PCAP_THREAD_EWOULDBLOCK_STR "nonblocking pcap can not be added"
+#define PCAP_THREAD_NOPCAPS_STR "nothing to capture on"
+#define PCAP_THREAD_NOCALLBACK_STR "no callback set"
+#define PCAP_THREAD_ERRNO_STR "system error, check errno"
+#define PCAP_THREAD_NOYIELD_STR "queue more yield requested but not supported"
+#define PCAP_THREAD_EOBSOLETE_STR "obsolete function or feature"
+#define PCAP_THREAD_ERUNNING_STR "pcap thread are running, can not complete task"
+#define PCAP_THREAD_ENOPCAPLIST_STR "no internal reference to the pcap that captured the packet"
+#define PCAP_THREAD_ELAYERCB_STR "layer callback already set in lower or higher segment"
+
+/* clang-format on */
+
+struct pcap_thread_linux_sll {
+ uint16_t packet_type;
+ uint16_t arp_hardware;
+ uint16_t link_layer_address_length;
+ uint8_t link_layer_address[8];
+ uint16_t ether_type;
+};
+struct pcap_thread_null_hdr {
+ uint32_t family;
+};
+struct pcap_thread_loop_hdr {
+ uint32_t family;
+};
+struct pcap_thread_ieee802_hdr {
+ uint16_t tpid;
+ unsigned short pcp : 3;
+ unsigned short dei : 1;
+ unsigned short vid : 12;
+ uint16_t ether_type;
+};
+struct pcap_thread_gre_hdr {
+ uint16_t gre_flags;
+ uint16_t ether_type;
+};
+struct pcap_thread_gre {
+ uint16_t checksum;
+ uint16_t key;
+ uint16_t sequence;
+};
+typedef enum pcap_thread_packet_state pcap_thread_packet_state_t;
+enum pcap_thread_packet_state {
+ PCAP_THREAD_PACKET_OK = 0,
+ PCAP_THREAD_PACKET_INVALID,
+ PCAP_THREAD_PACKET_UNSUPPORTED,
+ PCAP_THREAD_PACKET_UNPROCESSED,
+ PCAP_THREAD_PACKET_INVALID_ETHER,
+ PCAP_THREAD_PACKET_INVALID_LINUX_SLL,
+ PCAP_THREAD_PACKET_INVALID_NULL,
+ PCAP_THREAD_PACKET_INVALID_LOOP,
+ PCAP_THREAD_PACKET_INVALID_IEEE802,
+ PCAP_THREAD_PACKET_INVALID_GRE,
+ PCAP_THREAD_PACKET_INVALID_IP,
+ PCAP_THREAD_PACKET_INVALID_IPV4,
+ PCAP_THREAD_PACKET_INVALID_IPV6,
+ PCAP_THREAD_PACKET_INVALID_IPV6HDR,
+ PCAP_THREAD_PACKET_INVALID_ICMP,
+ PCAP_THREAD_PACKET_INVALID_ICMPV6,
+ PCAP_THREAD_PACKET_INVALID_UDP,
+ PCAP_THREAD_PACKET_INVALID_TCP,
+ PCAP_THREAD_PACKET_IS_FRAGMENT,
+ PCAP_THREAD_PACKET_INVALID_FRAGMENT,
+ PCAP_THREAD_PACKET_ENOMEM,
+ PCAP_THREAD_PACKET_EMUTEX,
+ PCAP_THREAD_PACKET_FRAGMENTED_GREHDR,
+ PCAP_THREAD_PACKET_FRAGMENTED_ICMPHDR,
+ PCAP_THREAD_PACKET_FRAGMENTED_ICMPV6HDR,
+ PCAP_THREAD_PACKET_FRAGMENTED_UDPHDR,
+ PCAP_THREAD_PACKET_FRAGMENTED_TCPHDR
+};
+
+typedef struct pcap_thread_packet pcap_thread_packet_t;
+struct pcap_thread_packet {
+ unsigned short have_prevpkt : 1;
+ unsigned short have_pkthdr : 1;
+ unsigned short have_linux_sll : 1;
+ unsigned short have_ethhdr : 1;
+ unsigned short have_nullhdr : 1;
+ unsigned short have_loophdr : 1;
+ unsigned short have_ieee802hdr : 1;
+ unsigned short have_grehdr : 1;
+ unsigned short have_gre : 1;
+ unsigned short have_iphdr : 1;
+ unsigned short have_ip6hdr : 1;
+ unsigned short have_ip6frag : 1;
+ unsigned short have_ip6rtdst : 1;
+ unsigned short have_icmphdr : 1;
+ unsigned short have_icmpv6hdr : 1;
+ unsigned short have_udphdr : 1;
+ unsigned short have_tcphdr : 1;
+ unsigned short have_tcpopts : 1;
+ unsigned short have_ippadding : 1;
+ unsigned short have_ip6padding : 1;
+
+ const char* name;
+ int dlt;
+ pcap_thread_packet_t* prevpkt;
+ struct pcap_pkthdr pkthdr;
+ struct pcap_thread_linux_sll linux_sll;
+ struct ether_header ethhdr;
+ struct pcap_thread_null_hdr nullhdr;
+ struct pcap_thread_loop_hdr loophdr;
+ struct pcap_thread_ieee802_hdr ieee802hdr;
+ struct pcap_thread_gre_hdr grehdr;
+ struct pcap_thread_gre gre;
+ struct ip iphdr;
+ struct ip6_hdr ip6hdr;
+ struct ip6_frag ip6frag;
+ uint8_t ip6frag_payload;
+ struct in6_addr ip6rtdst;
+ struct {
+ u_int8_t type;
+ u_int8_t code;
+ u_int16_t checksum;
+ } icmphdr;
+ struct {
+ u_int8_t icmp6_type;
+ u_int8_t icmp6_code;
+ u_int16_t icmp6_cksum;
+ } icmpv6hdr;
+ struct {
+ union {
+ struct {
+ u_int16_t uh_sport;
+ u_int16_t uh_dport;
+ u_int16_t uh_ulen;
+ u_int16_t uh_sum;
+ };
+ struct {
+ u_int16_t source;
+ u_int16_t dest;
+ u_int16_t len;
+ u_int16_t check;
+ };
+ };
+ } udphdr;
+ struct {
+ union {
+ struct {
+ u_int16_t th_sport;
+ u_int16_t th_dport;
+ u_int32_t th_seq;
+ u_int32_t th_ack;
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ u_int8_t th_x2 : 4;
+ u_int8_t th_off : 4;
+#endif
+#if __BYTE_ORDER == __BIG_ENDIAN
+ u_int8_t th_off : 4;
+ u_int8_t th_x2 : 4;
+#endif
+ u_int8_t th_flags;
+ u_int16_t th_win;
+ u_int16_t th_sum;
+ u_int16_t th_urp;
+ };
+ struct {
+ u_int16_t source;
+ u_int16_t dest;
+ u_int32_t seq;
+ u_int32_t ack_seq;
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ u_int16_t res1 : 4;
+ u_int16_t doff : 4;
+ u_int16_t fin : 1;
+ u_int16_t syn : 1;
+ u_int16_t rst : 1;
+ u_int16_t psh : 1;
+ u_int16_t ack : 1;
+ u_int16_t urg : 1;
+ u_int16_t res2 : 2;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ u_int16_t doff : 4;
+ u_int16_t res1 : 4;
+ u_int16_t res2 : 2;
+ u_int16_t urg : 1;
+ u_int16_t ack : 1;
+ u_int16_t psh : 1;
+ u_int16_t rst : 1;
+ u_int16_t syn : 1;
+ u_int16_t fin : 1;
+#endif
+ u_int16_t window;
+ u_int16_t check;
+ u_int16_t urg_ptr;
+ };
+ };
+ } tcphdr;
+ u_int8_t tcpopts[64];
+ size_t tcpopts_len;
+
+ size_t ippadding;
+ size_t ip6padding;
+
+ pcap_thread_packet_state_t state;
+};
+
+typedef enum pcap_thread_queue_mode pcap_thread_queue_mode_t;
+typedef struct pcap_thread pcap_thread_t;
+typedef void (*pcap_thread_callback_t)(u_char* user, const struct pcap_pkthdr* pkthdr, const u_char* pkt, const char* name, int dlt);
+typedef void (*pcap_thread_layer_callback_t)(u_char* user, const pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+typedef void (*pcap_thread_stats_callback_t)(u_char* user, const struct pcap_stat* stats, const char* name, int dlt);
+#ifndef HAVE_PCAP_DIRECTION_T
+typedef int pcap_direction_t;
+#endif
+typedef struct pcap_thread_pcaplist pcap_thread_pcaplist_t;
+typedef enum pcap_thread_activate_mode pcap_thread_activate_mode_t;
+
+enum pcap_thread_queue_mode {
+ PCAP_THREAD_QUEUE_MODE_COND,
+ PCAP_THREAD_QUEUE_MODE_WAIT,
+ PCAP_THREAD_QUEUE_MODE_YIELD,
+ PCAP_THREAD_QUEUE_MODE_DROP,
+ PCAP_THREAD_QUEUE_MODE_DIRECT
+};
+
+enum pcap_thread_activate_mode {
+ PCAP_THREAD_ACTIVATE_MODE_IMMEDIATE,
+ PCAP_THREAD_ACTIVATE_MODE_DELAYED
+};
+
+#ifdef HAVE_PCAP_DIRECTION_T
+#define PCAP_THREAD_T_INIT_DIRECTION_T 0,
+#else
+#define PCAP_THREAD_T_INIT_DIRECTION_T
+#endif
+
+#ifdef HAVE_PTHREAD
+#define PCAP_THREAD_T_INIT_QUEUE PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, \
+ 0, 0, 0, 0, 0, 0,
+#else
+#define PCAP_THREAD_T_INIT_QUEUE
+#endif
+
+#ifdef PCAP_TSTAMP_PRECISION_MICRO
+#define PCAP_THREAD_T_INIT_PRECISION PCAP_TSTAMP_PRECISION_MICRO
+#else
+#define PCAP_THREAD_T_INIT_PRECISION 0
+#endif
+
+typedef void* (*pcap_thread_layer_callback_frag_new_t)(void* conf, u_char* user);
+typedef void (*pcap_thread_layer_callback_frag_free_t)(void* ctx);
+typedef pcap_thread_packet_state_t (*pcap_thread_layer_callback_frag_reassemble_t)(void* ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length, pcap_thread_packet_t** whole_packet, const u_char** whole_payload, size_t* whole_length);
+typedef void (*pcap_thread_layer_callback_frag_release_t)(void* ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+
+/* clang-format off */
+#define PCAP_THREAD_LAYER_CALLBACK_FRAG_T_INIT { \
+ 0, 0, 0, 0, 0, \
+}
+/* clang-format on */
+
+typedef struct pcap_thread_layer_callback_frag pcap_thread_layer_callback_frag_t;
+struct pcap_thread_layer_callback_frag {
+ void* conf;
+ pcap_thread_layer_callback_frag_new_t new;
+ pcap_thread_layer_callback_frag_free_t free;
+ pcap_thread_layer_callback_frag_reassemble_t reassemble;
+ pcap_thread_layer_callback_frag_release_t release;
+};
+
+/* clang-format off */
+#define PCAP_THREAD_T_INIT { \
+ 0, 0, 0, 0, \
+ 0, 1, 0, PCAP_THREAD_DEFAULT_QUEUE_MODE, PCAP_THREAD_DEFAULT_QUEUE_SIZE, \
+ PCAP_THREAD_T_INIT_QUEUE \
+ 0, 0, 0, 0, PCAP_THREAD_DEFAULT_TIMEOUT, \
+ 0, 0, PCAP_THREAD_T_INIT_PRECISION, 0, \
+ PCAP_THREAD_T_INIT_DIRECTION_T \
+ 0, 0, 0, 1, PCAP_NETMASK_UNKNOWN, \
+ 0, 0, \
+ 0, "", 0, 0, \
+ { 0, 0 }, { 0, 0 }, \
+ PCAP_THREAD_DEFAULT_ACTIVATE_MODE, \
+ 0, 0, 0, 0, 0, 0, 0, 0, PCAP_THREAD_LAYER_CALLBACK_FRAG_T_INIT, 0, PCAP_THREAD_LAYER_CALLBACK_FRAG_T_INIT, 0, 0, 0, 0, \
+ 0 \
+}
+/* clang-format on */
+
+struct pcap_thread {
+ unsigned short have_timestamp_precision : 1;
+ unsigned short have_timestamp_type : 1;
+ unsigned short have_direction : 1;
+ unsigned short was_stopped : 1;
+
+ int running;
+ int use_threads;
+ int use_layers;
+ pcap_thread_queue_mode_t queue_mode;
+ size_t queue_size;
+
+#ifdef HAVE_PTHREAD
+ pthread_cond_t have_packets;
+ pthread_cond_t can_write;
+ pthread_mutex_t mutex;
+
+ struct pcap_pkthdr* pkthdr;
+ u_char* pkt;
+ pcap_thread_pcaplist_t** pcaplist_pkt;
+ size_t read_pos;
+ size_t write_pos;
+ size_t pkts;
+#endif
+
+ int snapshot;
+ int snaplen;
+ int promiscuous;
+ int monitor;
+ int timeout;
+
+ int buffer_size;
+ int timestamp_type;
+ int timestamp_precision;
+ int immediate_mode;
+
+#ifdef HAVE_PCAP_DIRECTION_T
+ pcap_direction_t direction;
+#endif
+
+ char* filter;
+ size_t filter_len;
+ int filter_errno;
+ int filter_optimize;
+ bpf_u_int32 filter_netmask;
+
+ pcap_thread_callback_t callback;
+ pcap_thread_callback_t dropback;
+
+ int status;
+ char errbuf[PCAP_ERRBUF_SIZE];
+ pcap_thread_pcaplist_t* pcaplist;
+ pcap_thread_pcaplist_t* step;
+
+ struct timeval timedrun;
+ struct timeval timedrun_to;
+
+ pcap_thread_activate_mode_t activate_mode;
+
+ pcap_thread_layer_callback_t callback_linux_sll;
+ pcap_thread_layer_callback_t callback_ether;
+ pcap_thread_layer_callback_t callback_null;
+ pcap_thread_layer_callback_t callback_loop;
+ pcap_thread_layer_callback_t callback_ieee802;
+ pcap_thread_layer_callback_t callback_gre;
+ pcap_thread_layer_callback_t callback_ip;
+ pcap_thread_layer_callback_t callback_ipv4;
+ pcap_thread_layer_callback_frag_t callback_ipv4_frag;
+ pcap_thread_layer_callback_t callback_ipv6;
+ pcap_thread_layer_callback_frag_t callback_ipv6_frag;
+ pcap_thread_layer_callback_t callback_icmp;
+ pcap_thread_layer_callback_t callback_icmpv6;
+ pcap_thread_layer_callback_t callback_udp;
+ pcap_thread_layer_callback_t callback_tcp;
+
+ pcap_thread_layer_callback_t callback_invalid;
+};
+
+#define PCAP_THREAD_SET_ERRBUF(x, y) strncpy(x->errbuf, y, sizeof(x->errbuf) - 1)
+
+#ifdef HAVE_PTHREAD
+#define PCAP_THREAD_PCAPLIST_T_INIT_THREAD 0,
+#else
+#define PCAP_THREAD_PCAPLIST_T_INIT_THREAD
+#endif
+
+/* clang-format off */
+#define PCAP_THREAD_PCAPLIST_T_INIT { \
+ 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, \
+ PCAP_THREAD_PCAPLIST_T_INIT_THREAD \
+ { 0, 0 }, \
+ 0, \
+}
+/* clang-format on */
+
+struct pcap_thread_pcaplist {
+ unsigned short have_bpf : 1;
+ unsigned short have_ipv4_frag_ctx : 1;
+ unsigned short have_ipv6_frag_ctx : 1;
+
+ pcap_thread_pcaplist_t* next;
+ char* name;
+ pcap_t* pcap;
+ void* user;
+ int running;
+ int is_offline;
+ void* ipv4_frag_ctx;
+ void* ipv6_frag_ctx;
+
+ pcap_thread_t* pcap_thread;
+
+#ifdef HAVE_PTHREAD
+ pthread_t thread;
+#endif
+
+ struct bpf_program bpf;
+
+ pcap_thread_callback_t layer_callback;
+};
+
+const char* pcap_thread_version_str(void);
+
+int pcap_thread_version_major(void);
+int pcap_thread_version_minor(void);
+int pcap_thread_version_patch(void);
+
+pcap_thread_t* pcap_thread_create(void);
+void pcap_thread_free(pcap_thread_t* pcap_thread);
+
+int pcap_thread_use_threads(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_use_threads(pcap_thread_t* pcap_thread, const int use_threads);
+int pcap_thread_use_layers(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_use_layers(pcap_thread_t* pcap_thread, const int use_layers);
+pcap_thread_queue_mode_t pcap_thread_queue_mode(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_queue_mode(pcap_thread_t* pcap_thread, const pcap_thread_queue_mode_t queue_mode);
+struct timeval pcap_thread_queue_wait(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_queue_wait(pcap_thread_t* pcap_thread, const struct timeval queue_wait);
+pcap_thread_queue_mode_t pcap_thread_callback_queue_mode(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_callback_queue_mode(pcap_thread_t* pcap_thread, const pcap_thread_queue_mode_t callback_queue_mode);
+struct timeval pcap_thread_callback_queue_wait(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_callback_queue_wait(pcap_thread_t* pcap_thread, const struct timeval callback_queue_wait);
+int pcap_thread_snapshot(const pcap_thread_t* pcap_thread);
+int pcap_thread_snaplen(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_snaplen(pcap_thread_t* pcap_thread, const int snaplen);
+int pcap_thread_promiscuous(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_promiscuous(pcap_thread_t* pcap_thread, const int promiscuous);
+int pcap_thread_monitor(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_monitor(pcap_thread_t* pcap_thread, const int monitor);
+int pcap_thread_timeout(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_timeout(pcap_thread_t* pcap_thread, const int timeout);
+int pcap_thread_buffer_size(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_buffer_size(pcap_thread_t* pcap_thread, const int buffer_size);
+int pcap_thread_timestamp_type(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_timestamp_type(pcap_thread_t* pcap_thread, const int timestamp_type);
+int pcap_thread_timestamp_precision(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_timestamp_precision(pcap_thread_t* pcap_thread, const int timestamp_precision);
+int pcap_thread_immediate_mode(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_immediate_mode(pcap_thread_t* pcap_thread, const int immediate_mode);
+pcap_direction_t pcap_thread_direction(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_direction(pcap_thread_t* pcap_thread, const pcap_direction_t direction);
+const char* pcap_thread_filter(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_filter(pcap_thread_t* pcap_thread, const char* filter, const size_t filter_len);
+int pcap_thread_clear_filter(pcap_thread_t* pcap_thread);
+int pcap_thread_filter_errno(const pcap_thread_t* pcap_thread);
+int pcap_thread_filter_optimize(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_filter_optimize(pcap_thread_t* pcap_thread, const int filter_optimize);
+bpf_u_int32 pcap_thread_filter_netmask(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_filter_netmask(pcap_thread_t* pcap_thread, const bpf_u_int32 filter_netmask);
+struct timeval pcap_thread_timedrun(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_timedrun(pcap_thread_t* pcap_thread, const struct timeval timedrun);
+struct timeval pcap_thread_timedrun_to(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_timedrun_to(pcap_thread_t* pcap_thread, const struct timeval timedrun_to);
+pcap_thread_activate_mode_t pcap_thread_activate_mode(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_activate_mode(pcap_thread_t* pcap_thread, const pcap_thread_activate_mode_t activate_mode);
+int pcap_thread_was_stopped(const pcap_thread_t* pcap_thread);
+
+size_t pcap_thread_queue_size(const pcap_thread_t* pcap_thread);
+int pcap_thread_set_queue_size(pcap_thread_t* pcap_thread, const size_t queue_size);
+
+int pcap_thread_set_callback(pcap_thread_t* pcap_thread, pcap_thread_callback_t callback);
+int pcap_thread_set_dropback(pcap_thread_t* pcap_thread, pcap_thread_callback_t dropback);
+
+int pcap_thread_set_callback_linux_sll(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_linux_sll);
+int pcap_thread_set_callback_ether(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ether);
+int pcap_thread_set_callback_null(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_null);
+int pcap_thread_set_callback_loop(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_loop);
+int pcap_thread_set_callback_ieee802(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ieee802);
+int pcap_thread_set_callback_gre(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_gre);
+int pcap_thread_set_callback_ip(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ip);
+int pcap_thread_set_callback_ipv4(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ipv4);
+int pcap_thread_set_callback_ipv4_frag(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_frag_t callback_ipv4_frag);
+int pcap_thread_set_callback_ipv6(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_ipv6);
+int pcap_thread_set_callback_ipv6_frag(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_frag_t callback_ipv6_frag);
+int pcap_thread_set_callback_icmp(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_icmp);
+int pcap_thread_set_callback_icmpv6(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_icmpv6);
+int pcap_thread_set_callback_udp(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_udp);
+int pcap_thread_set_callback_tcp(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_tcp);
+int pcap_thread_set_callback_invalid(pcap_thread_t* pcap_thread, pcap_thread_layer_callback_t callback_tcp);
+
+int pcap_thread_open(pcap_thread_t* pcap_thread, const char* device, void* user);
+int pcap_thread_open_offline(pcap_thread_t* pcap_thread, const char* file, void* user);
+int pcap_thread_add(pcap_thread_t* pcap_thread, const char* name, pcap_t* pcap, void* user);
+int pcap_thread_activate(pcap_thread_t* pcap_thread);
+int pcap_thread_close(pcap_thread_t* pcap_thread);
+
+int pcap_thread_run(pcap_thread_t* pcap_thread);
+int pcap_thread_next(pcap_thread_t* pcap_thread);
+int pcap_thread_next_reset(pcap_thread_t* pcap_thread);
+int pcap_thread_stop(pcap_thread_t* pcap_thread);
+
+int pcap_thread_stats(pcap_thread_t* pcap_thread, pcap_thread_stats_callback_t callback, u_char* user);
+
+int pcap_thread_status(const pcap_thread_t* pcap_thread);
+const char* pcap_thread_errbuf(const pcap_thread_t* pcap_thread);
+const char* pcap_thread_strerr(int error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __pcap_thread_h */
diff --git a/src/pcap-thread/pcap_thread_ext_frag.c b/src/pcap-thread/pcap_thread_ext_frag.c
new file mode 100644
index 0000000..6593e92
--- /dev/null
+++ b/src/pcap-thread/pcap_thread_ext_frag.c
@@ -0,0 +1,1013 @@
+/*
+ * Author Jerry Lundström <jerry@dns-oarc.net>
+ * Copyright (c) 2016-2017, OARC, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "pcap_thread_ext_frag.h"
+
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_PTHREAD
+#include <pthread.h>
+#endif
+
+#ifndef PCAP_THREAD_EXT_FRAG_TRACE
+#define PCAP_THREAD_EXT_FRAG_TRACE 0
+#endif
+
+/*
+ * Forward declares for callbacks
+ */
+
+static void* pcap_thread_layer_callback_frag_new(void* conf, u_char* user);
+static void pcap_thread_layer_callback_frag_free(void* _ctx);
+static pcap_thread_packet_state_t pcap_thread_layer_callback_frag_reassemble(void* _ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length, pcap_thread_packet_t** whole_packet, const u_char** whole_payload, size_t* whole_length);
+static void pcap_thread_layer_callback_frag_release(void* _ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length);
+
+/*
+ * Create/Free
+ */
+
+static pcap_thread_ext_frag_conf_t _conf_defaults = PCAP_THREAD_EXT_FRAG_CONF_T_INIT;
+
+pcap_thread_ext_frag_conf_t* pcap_thread_ext_frag_conf_new(void)
+{
+ pcap_thread_ext_frag_conf_t* conf = calloc(1, sizeof(pcap_thread_ext_frag_conf_t));
+ if (conf) {
+ memcpy(conf, &_conf_defaults, sizeof(pcap_thread_ext_frag_conf_t));
+ }
+
+ return conf;
+}
+
+void pcap_thread_ext_frag_conf_free(pcap_thread_ext_frag_conf_t* conf)
+{
+ if (conf) {
+ free(conf);
+ }
+}
+
+/*
+ * Get/Set
+ */
+
+int pcap_thread_ext_frag_conf_reject_overlap(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return 0;
+ }
+
+ return conf->reject_overlap;
+}
+
+int pcap_thread_ext_frag_conf_set_reject_overlap(pcap_thread_ext_frag_conf_t* conf, const int reject_overlap)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->reject_overlap = reject_overlap ? 1 : 0;
+
+ return PCAP_THREAD_OK;
+}
+
+int pcap_thread_ext_frag_conf_check_timeout(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return 0;
+ }
+
+ return conf->check_timeout;
+}
+
+int pcap_thread_ext_frag_conf_set_check_timeout(pcap_thread_ext_frag_conf_t* conf, const int check_timeout)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->check_timeout = check_timeout ? 1 : 0;
+
+ return PCAP_THREAD_OK;
+}
+
+pcap_thread_ext_frag_reassemble_mode_t pcap_thread_ext_frag_conf_reassemble_mode(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC791;
+ }
+
+ return conf->reassemble_mode;
+}
+
+int pcap_thread_ext_frag_conf_set_reassemble_mode(pcap_thread_ext_frag_conf_t* conf, const pcap_thread_ext_frag_reassemble_mode_t reassemble_mode)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ switch (reassemble_mode) {
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC791:
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_BSD:
+ break;
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC815:
+ /* TODO: Implement */
+ default:
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->reassemble_mode = reassemble_mode;
+
+ return PCAP_THREAD_OK;
+}
+
+size_t pcap_thread_ext_frag_conf_fragments(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return -1;
+ }
+
+ return conf->fragments;
+}
+
+int pcap_thread_ext_frag_conf_set_fragments(pcap_thread_ext_frag_conf_t* conf, const size_t fragments)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->fragments = fragments;
+
+ return PCAP_THREAD_OK;
+}
+
+size_t pcap_thread_ext_frag_conf_per_packet(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return -1;
+ }
+
+ return conf->per_packet;
+}
+
+int pcap_thread_ext_frag_conf_set_per_packet(pcap_thread_ext_frag_conf_t* conf, const size_t per_packet)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->per_packet = per_packet;
+
+ return PCAP_THREAD_OK;
+}
+
+struct timeval pcap_thread_ext_frag_conf_timeout(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ struct timeval ret = { 0, 0 };
+ return ret;
+ }
+
+ return conf->timeout;
+}
+
+int pcap_thread_ext_frag_conf_set_timeout(pcap_thread_ext_frag_conf_t* conf, const struct timeval timeout)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->timeout = timeout;
+
+ return PCAP_THREAD_OK;
+}
+
+pcap_thread_ext_frag_callback_t pcap_thread_ext_frag_conf_overlap_callback(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return 0;
+ }
+
+ return conf->overlap_callback;
+}
+
+int pcap_thread_ext_frag_conf_set_overlap_callback(pcap_thread_ext_frag_conf_t* conf, pcap_thread_ext_frag_callback_t overlap_callback)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->overlap_callback = overlap_callback;
+
+ return PCAP_THREAD_OK;
+}
+
+pcap_thread_ext_frag_callback_t pcap_thread_ext_frag_conf_timeout_callback(const pcap_thread_ext_frag_conf_t* conf)
+{
+ if (!conf) {
+ return 0;
+ }
+
+ return conf->timeout_callback;
+}
+
+int pcap_thread_ext_frag_conf_set_timeout_callback(pcap_thread_ext_frag_conf_t* conf, pcap_thread_ext_frag_callback_t timeout_callback)
+{
+ if (!conf) {
+ return PCAP_THREAD_EINVAL;
+ }
+
+ conf->timeout_callback = timeout_callback;
+
+ return PCAP_THREAD_OK;
+}
+
+/*
+ * Init
+ */
+
+pcap_thread_layer_callback_frag_t pcap_thread_ext_frag_layer_callback(pcap_thread_ext_frag_conf_t* conf)
+{
+ pcap_thread_layer_callback_frag_t callback = PCAP_THREAD_LAYER_CALLBACK_FRAG_T_INIT;
+
+ if (conf) {
+ callback.conf = (void*)conf;
+ callback.new = pcap_thread_layer_callback_frag_new;
+ callback.free = pcap_thread_layer_callback_frag_free;
+ callback.reassemble = pcap_thread_layer_callback_frag_reassemble;
+ callback.release = pcap_thread_layer_callback_frag_release;
+ }
+
+ return callback;
+}
+
+/*
+ * Callbacks
+ */
+
+#if PCAP_THREAD_EXT_FRAG_TRACE
+#include <stdio.h>
+#define layer_trace(msg) printf("LT %s:%d: " msg "\n", __FILE__, __LINE__)
+#define layer_tracef(msg, args...) printf("LT %s:%d: " msg "\n", __FILE__, __LINE__, args)
+#else
+#define layer_trace(msg)
+#define layer_tracef(msg, args...)
+#endif
+
+/* TODO:
+typedef struct _hole _hole_t;
+struct _hole {
+ _hole_t* next;
+
+ size_t first, last;
+};
+*/
+
+#ifdef HAVE_PTHREAD
+#define PCAP_THREAD_EXT_FRAG_CTX_T_INIT_MUTEX PTHREAD_MUTEX_INITIALIZER,
+#else
+#define PCAP_THREAD_EXT_FRAG_CTX_T_INIT_MUTEX
+#endif
+
+/* clang-format off */
+#define PCAP_THREAD_EXT_FRAG_CTX_T_INIT { \
+ PCAP_THREAD_EXT_FRAG_CTX_T_INIT_MUTEX \
+ PCAP_THREAD_EXT_FRAG_CONF_T_INIT, 0, 0 \
+}
+/* clang-format on */
+
+typedef struct _ctx _ctx_t;
+struct _ctx {
+#ifdef HAVE_PTHREAD
+ pthread_mutex_t mutex;
+#endif
+ pcap_thread_ext_frag_conf_t conf;
+ pcap_thread_ext_frag_fragments_t* fragments;
+ size_t num_fragments;
+};
+
+static _ctx_t _ctx_defaults = PCAP_THREAD_EXT_FRAG_CTX_T_INIT;
+
+static void* pcap_thread_layer_callback_frag_new(void* conf, u_char* user)
+{
+ _ctx_t* ctx = calloc(1, sizeof(_ctx_t));
+ if (ctx) {
+ layer_tracef("new ctx %p", ctx);
+ memcpy(ctx, &_ctx_defaults, sizeof(_ctx_t));
+ if (conf) {
+ memcpy(&(ctx->conf), conf, sizeof(pcap_thread_ext_frag_conf_t));
+ }
+ }
+
+ return ctx;
+}
+
+static void pcap_thread_layer_callback_frag_free(void* _ctx)
+{
+ _ctx_t* ctx = (_ctx_t*)_ctx;
+ if (ctx) {
+ layer_tracef("free ctx %p", ctx);
+ while (ctx->fragments) {
+ pcap_thread_ext_frag_fragments_t* frags = ctx->fragments;
+ ctx->fragments = frags->next;
+
+ while (frags->fragments) {
+ pcap_thread_ext_frag_fragment_t* frag = frags->fragments;
+ frags->fragments = frag->next;
+
+ if (frag->payload) {
+ free(frag->payload);
+ }
+ free(frag);
+ }
+
+ if (frags->payload) {
+ free(frags->payload);
+ }
+ free(frags);
+ }
+ }
+}
+
+static pcap_thread_packet_state_t reassemble(_ctx_t* ctx, const pcap_thread_packet_t* packet, pcap_thread_packet_t** whole_packet, const u_char** whole_payload, size_t* whole_length, pcap_thread_ext_frag_fragments_t* frags, pcap_thread_ext_frag_fragment_t* frag)
+{
+ pcap_thread_ext_frag_fragment_t *f, *f_prev;
+ int missing_frag = 0;
+ /* TODO:
+ int rfc815_seen_no_more_frags = 0;
+ */
+
+ if ((frag->offset + frag->length) > frags->length) {
+ frags->length = frag->offset + frag->length;
+ }
+
+ layer_tracef("new frag len %lu off %lu mf %d (frags len %lu)", frag->length, frag->offset, frag->flag_more_fragments, frags->length);
+
+ /* Place the fragment in the fragments list */
+ switch (ctx->conf.reassemble_mode) {
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC791:
+ for (f_prev = 0, f = frags->fragments; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+
+ if (f->offset > frag->offset) {
+ if (f_prev) {
+ f_prev->next = frag;
+ } else {
+ frags->fragments = frag;
+ }
+ frag->next = f;
+ f = frag;
+ break;
+ }
+ if (f_prev && (f_prev->offset + f_prev->length) < f->offset) {
+ missing_frag = 1;
+ }
+ }
+ if (!f) {
+ if (f_prev) {
+ f_prev->next = frag;
+ if ((f_prev->offset + f_prev->length) < frag->offset) {
+ missing_frag = 1;
+ }
+ } else {
+ frags->fragments = frag;
+ }
+ /* New frag is now last frag */
+ f_prev = frag;
+ } else if (!missing_frag) {
+ for (; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+ if (f_prev && (f_prev->offset + f_prev->length) < f->offset) {
+ missing_frag = 1;
+ break;
+ }
+ }
+ }
+ /*
+ * If first is not offset zero or last have more fragments flag,
+ * we are missing fragments.
+ */
+ if (!missing_frag && (frags->fragments->offset || (f_prev && f_prev->flag_more_fragments))) {
+ missing_frag = 1;
+ }
+ break;
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC815:
+ /* TODO:
+ for (f_prev = 0, f = frags->fragments; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+
+ if (!f->flag_more_fragments) {
+ rfc815_seen_no_more_frags = 1;
+ }
+ }
+ */
+ free(frag->payload);
+ free(frag);
+ return PCAP_THREAD_EINVAL;
+ break;
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_BSD:
+ for (f_prev = 0, f = frags->fragments; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+
+ if (f->offset > frag->offset) {
+ if (f_prev) {
+ f_prev->next = frag;
+ } else {
+ frags->fragments = frag;
+ }
+ frag->next = f;
+ f = frag;
+ break;
+ }
+ if (f_prev && (f->offset + f->length) < f_prev->offset) {
+ missing_frag = 1;
+ }
+ }
+ if (!f) {
+ if (f_prev) {
+ f_prev->next = frag;
+ if ((frag->offset + frag->length) < f_prev->offset) {
+ missing_frag = 1;
+ }
+ } else {
+ frags->fragments = frag;
+ }
+ } else if (!missing_frag) {
+ for (; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+ if (f_prev && (f->offset + f->length) < f_prev->offset) {
+ missing_frag = 1;
+ break;
+ }
+ }
+ }
+ /*
+ * If first (last on list) is not offset zero or last (first on
+ * list) have more fragments flag, we are missing fragments.
+ */
+ if (!missing_frag && ((f_prev && f_prev->offset) || frags->fragments->flag_more_fragments)) {
+ missing_frag = 1;
+ }
+ break;
+ }
+ frags->num_fragments++;
+
+ if (missing_frag) {
+ layer_trace("need more frags");
+ return PCAP_THREAD_PACKET_OK;
+ }
+
+ if (!frags->length) {
+ layer_trace("frags complete but no size");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ if (ctx->conf.reject_overlap) {
+ switch (ctx->conf.reassemble_mode) {
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC791:
+ for (f_prev = 0, f = frags->fragments; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+ if (f_prev && (f_prev->offset + f_prev->length) > f->offset) {
+ layer_trace("overlapping fragment");
+ if (ctx->conf.overlap_callback)
+ ctx->conf.overlap_callback(packet, frag->payload, frag->length, frags);
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+ }
+ break;
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC815:
+ /* TODO:
+ */
+ break;
+ case PCAP_THREAD_EXT_FRAG_REASSEMBLE_BSD:
+ for (f_prev = 0, f = frags->fragments; f; f_prev = f, f = f->next) {
+ layer_tracef("checking frag %p len %lu off %lu mf %d next %p", f, f->length, f->offset, f->flag_more_fragments, f->next);
+ if (f_prev && (f->offset + f->length) > f_prev->offset) {
+ layer_trace("overlapping fragment");
+ if (ctx->conf.overlap_callback)
+ ctx->conf.overlap_callback(packet, frag->payload, frag->length, frags);
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+ }
+ break;
+ }
+ }
+
+ /*
+ * Reassemble packet
+ */
+ if (!(frags->payload = calloc(1, frags->length))) {
+ layer_trace("nomem frags payload");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+ for (f = frags->fragments; f; f = f->next) {
+ memcpy(frags->payload + f->offset, f->payload, f->length);
+ }
+
+ frags->packet.name = packet->name;
+ frags->packet.dlt = packet->dlt;
+ frags->packet.pkthdr = packet->pkthdr;
+ /*
+ * We add the total payload length minus current fragment, since it is
+ * already included, to the pkthdr lengths in order to return correct
+ * total packet length (header + payload).
+ */
+ frags->packet.pkthdr.len += frags->length - frag->length;
+ frags->packet.pkthdr.caplen += frags->length - frag->length;
+ frags->packet.have_pkthdr = packet->have_pkthdr;
+
+ *whole_packet = &(frags->packet);
+ *whole_payload = frags->payload;
+ *whole_length = frags->length;
+
+ return PCAP_THREAD_PACKET_OK;
+}
+
+static pcap_thread_packet_state_t reassemble_ipv4(_ctx_t* ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length, pcap_thread_packet_t** whole_packet, const u_char** whole_payload, size_t* whole_length)
+{
+ pcap_thread_ext_frag_fragments_t *frags, *frags_prev;
+ pcap_thread_ext_frag_fragment_t* frag;
+
+ if (!packet->have_pkthdr) {
+ layer_trace("no pkthdr");
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+
+ layer_tracef("ipv4 ctx %p", ctx);
+
+ /* Find packet fragments */
+ for (frags_prev = 0, frags = ctx->fragments; frags; frags_prev = frags, frags = frags->next) {
+ if (frags->packet.have_iphdr
+ && packet->iphdr.ip_id == frags->packet.iphdr.ip_id
+ && packet->iphdr.ip_p == frags->packet.iphdr.ip_p
+ && packet->iphdr.ip_src.s_addr == frags->packet.iphdr.ip_src.s_addr
+ && packet->iphdr.ip_dst.s_addr == frags->packet.iphdr.ip_dst.s_addr) {
+
+ layer_tracef("frags %d found", packet->iphdr.ip_id);
+
+ /* Found it, remove from list */
+ if (frags_prev) {
+ frags_prev->next = frags->next;
+ }
+ if (ctx->fragments == frags) {
+ ctx->fragments = frags->next;
+ }
+ frags->next = 0;
+ break;
+ }
+ }
+
+ /* Check if frags is timed out */
+ if (ctx->conf.check_timeout && frags) {
+ struct timeval ts;
+
+ ts = frags->packet.pkthdr.ts;
+ ts.tv_sec += ctx->conf.timeout.tv_sec;
+ ts.tv_usec += ctx->conf.timeout.tv_usec;
+ ts.tv_usec %= 1000000;
+ if (packet->pkthdr.ts.tv_sec > ts.tv_sec
+ || (packet->pkthdr.ts.tv_sec == ts.tv_sec
+ && packet->pkthdr.ts.tv_usec > ts.tv_usec)) {
+
+ pcap_thread_ext_frag_fragment_t* f;
+
+ layer_tracef("frags timed out (last: %lu.%lu, this: %lu.%lu)",
+ frags->packet.pkthdr.ts.tv_sec, frags->packet.pkthdr.ts.tv_usec,
+ packet->pkthdr.ts.tv_sec, packet->pkthdr.ts.tv_usec);
+
+ if (ctx->conf.timeout_callback)
+ ctx->conf.timeout_callback(packet, payload, length, frags);
+
+ for (f = frags->fragments; f;) {
+ frag = f;
+ f = f->next;
+ if (frag->payload) {
+ free(frag->payload);
+ }
+ free(frag);
+ }
+
+ if (frags->payload) {
+ free(frags->payload);
+ }
+ free(frags);
+ frags = 0;
+ } else {
+ frags->packet.pkthdr.ts = packet->pkthdr.ts;
+ }
+ }
+
+ /* No fragments found, create new */
+ if (!frags) {
+ if (ctx->num_fragments >= ctx->conf.fragments) {
+ layer_trace("too many frags");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ if (!(frags = calloc(1, sizeof(pcap_thread_ext_frag_fragments_t)))) {
+ layer_trace("nomem frags");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+
+ layer_tracef("new frags %d", packet->iphdr.ip_id);
+
+ // TODO: How to handle prevpkt
+ memcpy(&(frags->packet.iphdr), &(packet->iphdr), sizeof(struct ip));
+ frags->packet.have_iphdr = 1;
+ frags->packet.pkthdr.ts = packet->pkthdr.ts;
+
+ ctx->num_fragments++;
+ }
+ /* Put the fragments first on the list */
+ frags->next = ctx->fragments;
+ ctx->fragments = frags;
+
+ if (frags->payload) {
+ layer_trace("already reassembled");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ if (frags->num_fragments >= ctx->conf.per_packet) {
+ layer_trace("too many frags frag");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ /* Allocate for the new fragment */
+ if (!(frag = calloc(1, sizeof(pcap_thread_ext_frag_fragment_t)))) {
+ layer_trace("nomem frag");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+ if (!(frag->payload = calloc(1, length))) {
+ free(frag);
+ layer_trace("nomem frag");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+ memcpy(frag->payload, payload, length);
+ frag->length = length;
+ frag->offset = (packet->iphdr.ip_off & 0x1fff) * 8;
+ frag->flag_more_fragments = packet->iphdr.ip_off & 0x2000 ? 1 : 0;
+
+ return reassemble(ctx, packet, whole_packet, whole_payload, whole_length, frags, frag);
+}
+
+static pcap_thread_packet_state_t reassemble_ipv6(_ctx_t* ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length, pcap_thread_packet_t** whole_packet, const u_char** whole_payload, size_t* whole_length)
+{
+ pcap_thread_ext_frag_fragments_t *frags, *frags_prev;
+ pcap_thread_ext_frag_fragment_t* frag;
+
+ layer_tracef("ipv6 ctx %p", ctx);
+
+ /* Find packet fragments */
+ for (frags_prev = 0, frags = ctx->fragments; frags; frags_prev = frags, frags = frags->next) {
+ if (frags->packet.have_ip6hdr
+ && packet->ip6frag.ip6f_ident == frags->packet.ip6frag.ip6f_ident
+ && !memcmp(&(packet->ip6hdr.ip6_src), &(frags->packet.ip6hdr.ip6_src), sizeof(struct in6_addr))
+ && ((!packet->have_ip6rtdst && !memcmp(&(packet->ip6hdr.ip6_dst), &(frags->packet.ip6hdr.ip6_dst), sizeof(struct in6_addr)))
+ || (packet->have_ip6rtdst && !memcmp(&(packet->ip6rtdst), &(frags->packet.ip6hdr.ip6_dst), sizeof(struct in6_addr))))) {
+
+ layer_tracef("frags %x found", packet->ip6frag.ip6f_ident);
+
+ /* Found it, remove from list */
+ if (frags_prev) {
+ frags_prev->next = frags->next;
+ }
+ if (ctx->fragments == frags) {
+ ctx->fragments = frags->next;
+ }
+ frags->next = 0;
+ break;
+ }
+ }
+
+ /* Check if frags is timed out */
+ if (ctx->conf.check_timeout && frags) {
+ struct timeval ts;
+
+ ts = frags->packet.pkthdr.ts;
+ ts.tv_sec += ctx->conf.timeout.tv_sec;
+ ts.tv_usec += ctx->conf.timeout.tv_usec;
+ ts.tv_usec %= 1000000;
+ if (packet->pkthdr.ts.tv_sec > ts.tv_sec
+ || (packet->pkthdr.ts.tv_sec == ts.tv_sec
+ && packet->pkthdr.ts.tv_usec > ts.tv_usec)) {
+
+ pcap_thread_ext_frag_fragment_t* f;
+
+ layer_tracef("frags timed out (last: %lu.%lu, this: %lu.%lu)",
+ frags->packet.pkthdr.ts.tv_sec, frags->packet.pkthdr.ts.tv_usec,
+ packet->pkthdr.ts.tv_sec, packet->pkthdr.ts.tv_usec);
+
+ if (ctx->conf.timeout_callback)
+ ctx->conf.timeout_callback(packet, payload, length, frags);
+
+ for (f = frags->fragments; f;) {
+ frag = f;
+ f = f->next;
+ if (frag->payload) {
+ free(frag->payload);
+ }
+ free(frag);
+ }
+
+ if (frags->payload) {
+ free(frags->payload);
+ }
+ free(frags);
+ frags = 0;
+ } else {
+ frags->packet.pkthdr.ts = packet->pkthdr.ts;
+ }
+ }
+
+ /* No fragments found, create new */
+ if (!frags) {
+ if (ctx->num_fragments >= ctx->conf.fragments) {
+ layer_trace("too many frags");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ if (!(frags = calloc(1, sizeof(pcap_thread_ext_frag_fragments_t)))) {
+ layer_trace("nomem frags");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+
+ layer_tracef("new frags %x", packet->ip6frag.ip6f_ident);
+
+ // TODO: How to handle prevpkt
+ memcpy(&(frags->packet.ip6hdr), &(packet->ip6hdr), sizeof(struct ip6_hdr));
+ frags->packet.have_ip6hdr = 1;
+ memcpy(&(frags->packet.ip6frag), &(packet->ip6frag), sizeof(struct ip6_frag));
+ frags->packet.have_ip6frag = 1;
+ frags->packet.ip6frag_payload = packet->ip6frag_payload;
+ if (packet->have_ip6rtdst) {
+ frags->packet.ip6hdr.ip6_dst = packet->ip6rtdst;
+ }
+ frags->packet.pkthdr.ts = packet->pkthdr.ts;
+
+ ctx->num_fragments++;
+ } else {
+ if (frags->packet.ip6frag_payload != packet->ip6frag_payload) {
+ layer_trace("wrong payload");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+ }
+ /* Put the fragments first on the list */
+ frags->next = ctx->fragments;
+ ctx->fragments = frags;
+
+ if (frags->payload) {
+ layer_trace("already reassembled");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ if (frags->num_fragments >= ctx->conf.per_packet) {
+ layer_trace("too many frags frag");
+ return PCAP_THREAD_PACKET_INVALID_FRAGMENT;
+ }
+
+ /* Allocate for the new fragment */
+ if (!(frag = calloc(1, sizeof(pcap_thread_ext_frag_fragment_t)))) {
+ layer_trace("nomem frag");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+ if (!(frag->payload = calloc(1, length))) {
+ free(frag);
+ layer_trace("nomem frag");
+ return PCAP_THREAD_PACKET_ENOMEM;
+ }
+ memcpy(frag->payload, payload, length);
+ frag->length = length;
+ frag->offset = ((packet->ip6frag.ip6f_offlg & 0xfff8) >> 3) * 8;
+ frag->flag_more_fragments = packet->ip6frag.ip6f_offlg & 0x1 ? 1 : 0;
+
+ return reassemble(ctx, packet, whole_packet, whole_payload, whole_length, frags, frag);
+}
+
+#ifdef HAVE_PTHREAD /* _release() is only used when mutex functions fails */
+static void _release(_ctx_t* ctx, const pcap_thread_packet_t* packet)
+{
+ pcap_thread_ext_frag_fragments_t *frags, *frags_prev;
+
+ layer_tracef("release ctx %p", ctx);
+
+ /* Find packet fragments */
+ for (frags_prev = 0, frags = ctx->fragments; frags; frags_prev = frags, frags = frags->next) {
+ if (frags->packet.have_iphdr
+ && packet->iphdr.ip_id == frags->packet.iphdr.ip_id
+ && packet->iphdr.ip_p == frags->packet.iphdr.ip_p
+ && packet->iphdr.ip_src.s_addr == frags->packet.iphdr.ip_src.s_addr
+ && packet->iphdr.ip_dst.s_addr == frags->packet.iphdr.ip_dst.s_addr) {
+
+ layer_tracef("release frags %d", packet->iphdr.ip_id);
+ break;
+ } else if (frags->packet.have_ip6hdr
+ && packet->ip6frag.ip6f_ident == frags->packet.ip6frag.ip6f_ident
+ && !memcmp(&(packet->ip6hdr.ip6_src), &(frags->packet.ip6hdr.ip6_src), sizeof(struct in6_addr))
+ && ((!packet->have_ip6rtdst && !memcmp(&(packet->ip6hdr.ip6_dst), &(frags->packet.ip6hdr.ip6_dst), sizeof(struct in6_addr)))
+ || (packet->have_ip6rtdst && !memcmp(&(packet->ip6rtdst), &(frags->packet.ip6hdr.ip6_dst), sizeof(struct in6_addr))))) {
+
+ layer_tracef("release frags %x", packet->ip6frag.ip6f_ident);
+ break;
+ }
+ }
+
+ if (frags) {
+ pcap_thread_ext_frag_fragment_t *frag, *f;
+
+ /* Found it, remove from list */
+ if (frags_prev) {
+ frags_prev->next = frags->next;
+ }
+ if (ctx->fragments == frags) {
+ ctx->fragments = frags->next;
+ }
+ frags->next = 0;
+ ctx->num_fragments--;
+
+ for (f = frags->fragments; f;) {
+ frag = f;
+ f = f->next;
+ if (frag->payload) {
+ free(frag->payload);
+ }
+ free(frag);
+ }
+
+ if (frags->payload) {
+ free(frags->payload);
+ }
+ free(frags);
+ }
+}
+#endif
+
+static pcap_thread_packet_state_t pcap_thread_layer_callback_frag_reassemble(void* _ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length, pcap_thread_packet_t** whole_packet, const u_char** whole_payload, size_t* whole_length)
+{
+ _ctx_t* ctx = (_ctx_t*)_ctx;
+ pcap_thread_packet_state_t state = PCAP_THREAD_PACKET_INVALID;
+
+ if (!ctx) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+ if (!packet) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+ if (!payload) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+ if (!length) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+ if (!whole_packet) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+ if (!whole_payload) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+ if (!whole_length) {
+ return PCAP_THREAD_PACKET_INVALID;
+ }
+
+ if (ctx && packet && payload && length
+ && whole_packet && whole_payload && whole_length) {
+ if (packet->have_iphdr) {
+#ifdef HAVE_PTHREAD
+ if (pthread_mutex_lock(&(ctx->mutex))) {
+ return PCAP_THREAD_PACKET_EMUTEX;
+ }
+#endif
+ state = reassemble_ipv4(ctx, packet, payload, length, whole_packet, whole_payload, whole_length);
+#ifdef HAVE_PTHREAD
+ if (pthread_mutex_unlock(&(ctx->mutex))) {
+ if (state == PCAP_THREAD_PACKET_OK && *whole_packet && *whole_payload && *whole_length) {
+ _release(ctx, *whole_packet);
+ }
+ return PCAP_THREAD_PACKET_EMUTEX;
+ }
+#endif
+ } else if (packet->have_ip6hdr && packet->have_ip6frag) {
+#ifdef HAVE_PTHREAD
+ if (pthread_mutex_lock(&(ctx->mutex))) {
+ return PCAP_THREAD_PACKET_EMUTEX;
+ }
+#endif
+ state = reassemble_ipv6(ctx, packet, payload, length, whole_packet, whole_payload, whole_length);
+#ifdef HAVE_PTHREAD
+ if (pthread_mutex_unlock(&(ctx->mutex))) {
+ if (state == PCAP_THREAD_PACKET_OK && *whole_packet && *whole_payload && *whole_length) {
+ _release(ctx, *whole_packet);
+ }
+ return PCAP_THREAD_PACKET_EMUTEX;
+ }
+#endif
+ }
+ }
+
+ return state;
+}
+
+static void pcap_thread_layer_callback_frag_release(void* _ctx, const pcap_thread_packet_t* packet, const u_char* payload, size_t length)
+{
+ _ctx_t* ctx = (_ctx_t*)_ctx;
+ pcap_thread_ext_frag_fragments_t *frags, *frags_prev;
+
+ if (!ctx) {
+ return;
+ }
+ if (!packet) {
+ return;
+ }
+ if (packet->have_ip6hdr) {
+ if (!packet->have_ip6frag) {
+ return;
+ }
+ } else if (!packet->have_iphdr) {
+ return;
+ }
+
+#ifdef HAVE_PTHREAD
+ if (pthread_mutex_lock(&(ctx->mutex))) {
+ return;
+ }
+#endif
+
+ /* Find packet fragments */
+ for (frags_prev = 0, frags = ctx->fragments; frags; frags_prev = frags, frags = frags->next) {
+ if ((frags->packet.have_iphdr
+ && packet->iphdr.ip_id == frags->packet.iphdr.ip_id
+ && packet->iphdr.ip_p == frags->packet.iphdr.ip_p
+ && packet->iphdr.ip_src.s_addr == frags->packet.iphdr.ip_src.s_addr
+ && packet->iphdr.ip_dst.s_addr == frags->packet.iphdr.ip_dst.s_addr)
+ || (frags->packet.have_ip6hdr
+ && packet->ip6frag.ip6f_ident == frags->packet.ip6frag.ip6f_ident
+ && !memcmp(&(packet->ip6hdr.ip6_src), &(frags->packet.ip6hdr.ip6_src), sizeof(struct in6_addr))
+ && ((!packet->have_ip6rtdst && !memcmp(&(packet->ip6hdr.ip6_dst), &(frags->packet.ip6hdr.ip6_dst), sizeof(struct in6_addr)))
+ || (packet->have_ip6rtdst && !memcmp(&(packet->ip6rtdst), &(frags->packet.ip6hdr.ip6_dst), sizeof(struct in6_addr)))))) {
+
+ /* Found it, remove from list */
+ if (frags_prev) {
+ frags_prev->next = frags->next;
+ }
+ if (ctx->fragments == frags) {
+ ctx->fragments = frags->next;
+ }
+ frags->next = 0;
+ ctx->num_fragments--;
+ break;
+ }
+ }
+
+#ifdef HAVE_PTHREAD
+ pthread_mutex_unlock(&(ctx->mutex));
+#endif
+
+ if (frags) {
+ pcap_thread_ext_frag_fragment_t *frag, *f;
+
+ for (f = frags->fragments; f;) {
+ frag = f;
+ f = f->next;
+ if (frag->payload) {
+ free(frag->payload);
+ }
+ free(frag);
+ }
+
+ if (frags->payload) {
+ free(frags->payload);
+ }
+ free(frags);
+ }
+}
diff --git a/src/pcap-thread/pcap_thread_ext_frag.h b/src/pcap-thread/pcap_thread_ext_frag.h
new file mode 100644
index 0000000..dfa151a
--- /dev/null
+++ b/src/pcap-thread/pcap_thread_ext_frag.h
@@ -0,0 +1,131 @@
+/*
+ * Author Jerry Lundström <jerry@dns-oarc.net>
+ * Copyright (c) 2016-2017, OARC, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pcap_thread.h"
+
+#ifndef __pcap_thread_ext_frag_h
+#define __pcap_thread_ext_frag_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * RFC791 - Handle fragments in an offset ascending order, allow fragments to overlap
+ * RFC815 - Handle fragments in a receiving order, allow fragments to overlap
+ * BSD - Handle fragments in an offset descending order, allow fragments to overlap
+ */
+typedef enum pcap_thread_ext_frag_reassemble_mode pcap_thread_ext_frag_reassemble_mode_t;
+enum pcap_thread_ext_frag_reassemble_mode {
+ PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC791 = 0,
+ PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC815,
+ PCAP_THREAD_EXT_FRAG_REASSEMBLE_BSD
+};
+
+typedef struct pcap_thread_ext_frag_fragment pcap_thread_ext_frag_fragment_t;
+struct pcap_thread_ext_frag_fragment {
+ pcap_thread_ext_frag_fragment_t* next;
+
+ unsigned short flag_more_fragments : 1;
+
+ u_char* payload;
+ size_t length;
+ size_t offset;
+};
+
+typedef struct pcap_thread_ext_frag_fragments pcap_thread_ext_frag_fragments_t;
+struct pcap_thread_ext_frag_fragments {
+ pcap_thread_ext_frag_fragments_t* next;
+
+ pcap_thread_packet_t packet;
+ pcap_thread_ext_frag_fragment_t* fragments;
+ size_t num_fragments;
+ u_char* payload;
+ size_t length;
+};
+
+typedef void (*pcap_thread_ext_frag_callback_t)(const pcap_thread_packet_t* packet, const u_char* payload, size_t length, const pcap_thread_ext_frag_fragments_t* fragments);
+
+/* clang-format off */
+#define PCAP_THREAD_EXT_FRAG_CONF_T_INIT { \
+ 0, 0, \
+ PCAP_THREAD_EXT_FRAG_REASSEMBLE_RFC791, \
+ 100, 10, { 30, 0 }, \
+ 0, 0 \
+}
+/* clang-format on */
+
+typedef struct pcap_thread_ext_frag_conf pcap_thread_ext_frag_conf_t;
+struct pcap_thread_ext_frag_conf {
+ unsigned short reject_overlap : 1;
+ unsigned short check_timeout : 1;
+
+ pcap_thread_ext_frag_reassemble_mode_t reassemble_mode;
+
+ size_t fragments;
+ size_t per_packet;
+ struct timeval timeout;
+
+ pcap_thread_ext_frag_callback_t overlap_callback;
+ pcap_thread_ext_frag_callback_t timeout_callback;
+};
+
+pcap_thread_ext_frag_conf_t* pcap_thread_ext_frag_conf_new(void);
+void pcap_thread_ext_frag_conf_free(pcap_thread_ext_frag_conf_t* conf);
+
+int pcap_thread_ext_frag_conf_reject_overlap(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_reject_overlap(pcap_thread_ext_frag_conf_t* conf, const int reject_overlap);
+int pcap_thread_ext_frag_conf_check_timeout(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_check_timeout(pcap_thread_ext_frag_conf_t* conf, const int check_timeout);
+pcap_thread_ext_frag_reassemble_mode_t pcap_thread_ext_frag_conf_reassemble_mode(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_reassemble_mode(pcap_thread_ext_frag_conf_t* conf, const pcap_thread_ext_frag_reassemble_mode_t reassemble_mode);
+size_t pcap_thread_ext_frag_conf_fragments(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_fragments(pcap_thread_ext_frag_conf_t* conf, const size_t fragments);
+size_t pcap_thread_ext_frag_conf_per_packet(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_per_packet(pcap_thread_ext_frag_conf_t* conf, const size_t per_packet);
+struct timeval pcap_thread_ext_frag_conf_timeout(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_timeout(pcap_thread_ext_frag_conf_t* conf, const struct timeval timeout);
+pcap_thread_ext_frag_callback_t pcap_thread_ext_frag_conf_overlap_callback(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_overlap_callback(pcap_thread_ext_frag_conf_t* conf, pcap_thread_ext_frag_callback_t overlap_callback);
+pcap_thread_ext_frag_callback_t pcap_thread_ext_frag_conf_timeout_callback(const pcap_thread_ext_frag_conf_t* conf);
+int pcap_thread_ext_frag_conf_set_timeout_callback(pcap_thread_ext_frag_conf_t* conf, pcap_thread_ext_frag_callback_t timeout_callback);
+
+pcap_thread_layer_callback_frag_t pcap_thread_ext_frag_layer_callback(pcap_thread_ext_frag_conf_t* conf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __pcap_thread_ext_frag_h */