summaryrefslogtreecommitdiffstats
path: root/src/fluent-bit/tests/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:07:37 +0000
commitb485aab7e71c1625cfc27e0f92c9509f42378458 (patch)
treeae9abe108601079d1679194de237c9a435ae5b55 /src/fluent-bit/tests/lib
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-b485aab7e71c1625cfc27e0f92c9509f42378458.tar.xz
netdata-b485aab7e71c1625cfc27e0f92c9509f42378458.zip
Adding upstream version 1.45.3+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/fluent-bit/tests/lib')
-rw-r--r--src/fluent-bit/tests/lib/acutest/README.md4
-rw-r--r--src/fluent-bit/tests/lib/acutest/acutest.h1839
-rw-r--r--src/fluent-bit/tests/lib/shunit2/.gitignore3
-rw-r--r--src/fluent-bit/tests/lib/shunit2/.travis.yml29
-rw-r--r--src/fluent-bit/tests/lib/shunit2/CODE_OF_CONDUCT.md46
-rw-r--r--src/fluent-bit/tests/lib/shunit2/LICENSE201
-rw-r--r--src/fluent-bit/tests/lib/shunit2/README.md636
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/CHANGES-2.1.md261
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.0.txt104
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.1.txt88
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.2.txt83
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.3.txt84
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.4.txt100
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.5.txt128
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.6.txt112
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.7.md66
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.8.md56
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/TODO.txt13
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/contributors.md15
-rw-r--r--src/fluent-bit/tests/lib/shunit2/doc/design_doc.txt34
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/equality_test.sh9
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/lineno_test.sh15
-rw-r--r--src/fluent-bit/tests/lib/shunit2/examples/math.inc17
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/math_test.sh25
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/mkdir_test.sh79
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/mock_file.sh80
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/mock_file_test.sh33
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/party_test.sh16
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/examples/suite_test.sh32
-rw-r--r--src/fluent-bit/tests/lib/shunit2/lib/shflags1222
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/lib/versions273
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit21343
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit2_args_test.sh59
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit2_asserts_test.sh258
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit2_failures_test.sh85
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit2_macros_test.sh265
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit2_misc_test.sh315
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/shunit2_standalone_test.sh38
-rw-r--r--src/fluent-bit/tests/lib/shunit2/shunit2_test_helpers234
-rwxr-xr-xsrc/fluent-bit/tests/lib/shunit2/test_runner171
40 files changed, 8471 insertions, 0 deletions
diff --git a/src/fluent-bit/tests/lib/acutest/README.md b/src/fluent-bit/tests/lib/acutest/README.md
new file mode 100644
index 000000000..e7f75b652
--- /dev/null
+++ b/src/fluent-bit/tests/lib/acutest/README.md
@@ -0,0 +1,4 @@
+Taken from https://github.com/mity/acutest
+
+MIT License
+
diff --git a/src/fluent-bit/tests/lib/acutest/acutest.h b/src/fluent-bit/tests/lib/acutest/acutest.h
new file mode 100644
index 000000000..20500ae55
--- /dev/null
+++ b/src/fluent-bit/tests/lib/acutest/acutest.h
@@ -0,0 +1,1839 @@
+/*
+ * Acutest -- Another C/C++ Unit Test facility
+ * <https://github.com/mity/acutest>
+ *
+ * Copyright 2013-2020 Martin Mitas
+ * Copyright 2019 Garrett D'Amore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef ACUTEST_H
+#define ACUTEST_H
+
+
+/************************
+ *** Public interface ***
+ ************************/
+
+/* By default, "acutest.h" provides the main program entry point (function
+ * main()). However, if the test suite is composed of multiple source files
+ * which include "acutest.h", then this causes a problem of multiple main()
+ * definitions. To avoid this problem, #define macro TEST_NO_MAIN in all
+ * compilation units but one.
+ */
+
+/* Macro to specify list of unit tests in the suite.
+ * The unit test implementation MUST provide list of unit tests it implements
+ * with this macro:
+ *
+ * TEST_LIST = {
+ * { "test1_name", test1_func_ptr },
+ * { "test2_name", test2_func_ptr },
+ * ...
+ * { NULL, NULL } // zeroed record marking the end of the list
+ * };
+ *
+ * The list specifies names of each test (must be unique) and pointer to
+ * a function implementing it. The function does not take any arguments
+ * and has no return values, i.e. every test function has to be compatible
+ * with this prototype:
+ *
+ * void test_func(void);
+ *
+ * Note the list has to be ended with a zeroed record.
+ */
+#define TEST_LIST const struct acutest_test_ acutest_list_[]
+
+
+/* Macros for testing whether an unit test succeeds or fails. These macros
+ * can be used arbitrarily in functions implementing the unit tests.
+ *
+ * If any condition fails throughout execution of a test, the test fails.
+ *
+ * TEST_CHECK takes only one argument (the condition), TEST_CHECK_ allows
+ * also to specify an error message to print out if the condition fails.
+ * (It expects printf-like format string and its parameters). The macros
+ * return non-zero (condition passes) or 0 (condition fails).
+ *
+ * That can be useful when more conditions should be checked only if some
+ * preceding condition passes, as illustrated in this code snippet:
+ *
+ * SomeStruct* ptr = allocate_some_struct();
+ * if(TEST_CHECK(ptr != NULL)) {
+ * TEST_CHECK(ptr->member1 < 100);
+ * TEST_CHECK(ptr->member2 > 200);
+ * }
+ */
+#define TEST_CHECK_(cond,...) acutest_check_((cond), __FILE__, __LINE__, __VA_ARGS__)
+#define TEST_CHECK(cond) acutest_check_((cond), __FILE__, __LINE__, "%s", #cond)
+
+
+/* These macros are the same as TEST_CHECK_ and TEST_CHECK except that if the
+ * condition fails, the currently executed unit test is immediately aborted.
+ *
+ * That is done either by calling abort() if the unit test is executed as a
+ * child process; or via longjmp() if the unit test is executed within the
+ * main Acutest process.
+ *
+ * As a side effect of such abortion, your unit tests may cause memory leaks,
+ * unflushed file descriptors, and other phenomena caused by the abortion.
+ *
+ * Therefore you should not use these as a general replacement for TEST_CHECK.
+ * Use it with some caution, especially if your test causes some other side
+ * effects to the outside world (e.g. communicating with some server, inserting
+ * into a database etc.).
+ */
+#define TEST_ASSERT_(cond,...) \
+ do { \
+ if(!acutest_check_((cond), __FILE__, __LINE__, __VA_ARGS__)) \
+ acutest_abort_(); \
+ } while(0)
+#define TEST_ASSERT(cond) \
+ do { \
+ if(!acutest_check_((cond), __FILE__, __LINE__, "%s", #cond)) \
+ acutest_abort_(); \
+ } while(0)
+
+
+#ifdef __cplusplus
+/* Macros to verify that the code (the 1st argument) throws exception of given
+ * type (the 2nd argument). (Note these macros are only available in C++.)
+ *
+ * TEST_EXCEPTION_ is like TEST_EXCEPTION but accepts custom printf-like
+ * message.
+ *
+ * For example:
+ *
+ * TEST_EXCEPTION(function_that_throw(), ExpectedExceptionType);
+ *
+ * If the function_that_throw() throws ExpectedExceptionType, the check passes.
+ * If the function throws anything incompatible with ExpectedExceptionType
+ * (or if it does not thrown an exception at all), the check fails.
+ */
+#define TEST_EXCEPTION(code, exctype) \
+ do { \
+ bool exc_ok_ = false; \
+ const char *msg_ = NULL; \
+ try { \
+ code; \
+ msg_ = "No exception thrown."; \
+ } catch(exctype const&) { \
+ exc_ok_= true; \
+ } catch(...) { \
+ msg_ = "Unexpected exception thrown."; \
+ } \
+ acutest_check_(exc_ok_, __FILE__, __LINE__, #code " throws " #exctype);\
+ if(msg_ != NULL) \
+ acutest_message_("%s", msg_); \
+ } while(0)
+#define TEST_EXCEPTION_(code, exctype, ...) \
+ do { \
+ bool exc_ok_ = false; \
+ const char *msg_ = NULL; \
+ try { \
+ code; \
+ msg_ = "No exception thrown."; \
+ } catch(exctype const&) { \
+ exc_ok_= true; \
+ } catch(...) { \
+ msg_ = "Unexpected exception thrown."; \
+ } \
+ acutest_check_(exc_ok_, __FILE__, __LINE__, __VA_ARGS__); \
+ if(msg_ != NULL) \
+ acutest_message_("%s", msg_); \
+ } while(0)
+#endif /* #ifdef __cplusplus */
+
+
+/* Sometimes it is useful to split execution of more complex unit tests to some
+ * smaller parts and associate those parts with some names.
+ *
+ * This is especially handy if the given unit test is implemented as a loop
+ * over some vector of multiple testing inputs. Using these macros allow to use
+ * sort of subtitle for each iteration of the loop (e.g. outputting the input
+ * itself or a name associated to it), so that if any TEST_CHECK condition
+ * fails in the loop, it can be easily seen which iteration triggers the
+ * failure, without the need to manually output the iteration-specific data in
+ * every single TEST_CHECK inside the loop body.
+ *
+ * TEST_CASE allows to specify only single string as the name of the case,
+ * TEST_CASE_ provides all the power of printf-like string formatting.
+ *
+ * Note that the test cases cannot be nested. Starting a new test case ends
+ * implicitly the previous one. To end the test case explicitly (e.g. to end
+ * the last test case after exiting the loop), you may use TEST_CASE(NULL).
+ */
+#define TEST_CASE_(...) acutest_case_(__VA_ARGS__)
+#define TEST_CASE(name) acutest_case_("%s", name)
+
+
+/* Maximal output per TEST_CASE call. Longer messages are cut.
+ * You may define another limit prior including "acutest.h"
+ */
+#ifndef TEST_CASE_MAXSIZE
+ #define TEST_CASE_MAXSIZE 64
+#endif
+
+
+/* printf-like macro for outputting an extra information about a failure.
+ *
+ * Intended use is to output some computed output versus the expected value,
+ * e.g. like this:
+ *
+ * if(!TEST_CHECK(produced == expected)) {
+ * TEST_MSG("Expected: %d", expected);
+ * TEST_MSG("Produced: %d", produced);
+ * }
+ *
+ * Note the message is only written down if the most recent use of any checking
+ * macro (like e.g. TEST_CHECK or TEST_EXCEPTION) in the current test failed.
+ * This means the above is equivalent to just this:
+ *
+ * TEST_CHECK(produced == expected);
+ * TEST_MSG("Expected: %d", expected);
+ * TEST_MSG("Produced: %d", produced);
+ *
+ * The macro can deal with multi-line output fairly well. It also automatically
+ * adds a final new-line if there is none present.
+ */
+#define TEST_MSG(...) acutest_message_(__VA_ARGS__)
+
+
+/* Maximal output per TEST_MSG call. Longer messages are cut.
+ * You may define another limit prior including "acutest.h"
+ */
+#ifndef TEST_MSG_MAXSIZE
+ #define TEST_MSG_MAXSIZE 1024
+#endif
+
+
+/* Macro for dumping a block of memory.
+ *
+ * Its intended use is very similar to what TEST_MSG is for, but instead of
+ * generating any printf-like message, this is for dumping raw block of a
+ * memory in a hexadecimal form:
+ *
+ * TEST_CHECK(size_produced == size_expected &&
+ * memcmp(addr_produced, addr_expected, size_produced) == 0);
+ * TEST_DUMP("Expected:", addr_expected, size_expected);
+ * TEST_DUMP("Produced:", addr_produced, size_produced);
+ */
+#define TEST_DUMP(title, addr, size) acutest_dump_(title, addr, size)
+
+/* Maximal output per TEST_DUMP call (in bytes to dump). Longer blocks are cut.
+ * You may define another limit prior including "acutest.h"
+ */
+#ifndef TEST_DUMP_MAXSIZE
+ #define TEST_DUMP_MAXSIZE 1024
+#endif
+
+
+/* Common test initialiation/clean-up
+ *
+ * In some test suites, it may be needed to perform some sort of the same
+ * initialization and/or clean-up in all the tests.
+ *
+ * Such test suites may use macros TEST_INIT and/or TEST_FINI prior including
+ * this header. The expansion of the macro is then used as a body of helper
+ * function called just before executing every single (TEST_INIT) or just after
+ * it ends (TEST_FINI).
+ *
+ * Examples of various ways how to use the macro TEST_INIT:
+ *
+ * #define TEST_INIT my_init_func();
+ * #define TEST_INIT my_init_func() // Works even without the semicolon
+ * #define TEST_INIT setlocale(LC_ALL, NULL);
+ * #define TEST_INIT { setlocale(LC_ALL, NULL); my_init_func(); }
+ *
+ * TEST_FINI is to be used in the same way.
+ */
+
+
+/**********************
+ *** Implementation ***
+ **********************/
+
+/* The unit test files should not rely on anything below. */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <setjmp.h>
+
+#if defined(unix) || defined(__unix__) || defined(__unix) || defined(__APPLE__)
+ #define ACUTEST_UNIX_ 1
+ #include <errno.h>
+ #include <libgen.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+ #include <signal.h>
+ #include <time.h>
+
+ #if defined CLOCK_PROCESS_CPUTIME_ID && defined CLOCK_MONOTONIC
+ #define ACUTEST_HAS_POSIX_TIMER_ 1
+ #endif
+#endif
+
+#if defined(_gnu_linux_) || defined(__linux__)
+ #define ACUTEST_LINUX_ 1
+ #include <fcntl.h>
+ #include <sys/stat.h>
+#endif
+
+#if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__)
+ #define ACUTEST_WIN_ 1
+ #include <windows.h>
+ #include <io.h>
+#endif
+
+#if defined(__APPLE__)
+ #define ACUTEST_MACOS_
+ #include <assert.h>
+ #include <stdbool.h>
+ #include <sys/types.h>
+ #include <unistd.h>
+ #include <sys/sysctl.h>
+#endif
+
+#ifdef __cplusplus
+ #include <exception>
+#endif
+
+#ifdef __has_include
+ #if __has_include(<valgrind.h>)
+ #include <valgrind.h>
+ #endif
+#endif
+
+/* Enable the use of the non-standard keyword __attribute__ to silence warnings under some compilers */
+#if defined(__GNUC__) || defined(__clang__)
+ #define ACUTEST_ATTRIBUTE_(attr) __attribute__((attr))
+#else
+ #define ACUTEST_ATTRIBUTE_(attr)
+#endif
+
+/* Note our global private identifiers end with '_' to mitigate risk of clash
+ * with the unit tests implementation. */
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#ifdef _MSC_VER
+ /* In the multi-platform code like ours, we cannot use the non-standard
+ * "safe" functions from Microsoft C lib like e.g. sprintf_s() instead of
+ * standard sprintf(). Hence, lets disable the warning C4996. */
+ #pragma warning(push)
+ #pragma warning(disable: 4996)
+#endif
+
+
+struct acutest_test_ {
+ const char* name;
+ void (*func)(void);
+};
+
+struct acutest_test_data_ {
+ unsigned char flags;
+ double duration;
+};
+
+enum {
+ ACUTEST_FLAG_RUN_ = 1 << 0,
+ ACUTEST_FLAG_SUCCESS_ = 1 << 1,
+ ACUTEST_FLAG_FAILURE_ = 1 << 2,
+};
+
+extern const struct acutest_test_ acutest_list_[];
+
+int acutest_check_(int cond, const char* file, int line, const char* fmt, ...);
+void acutest_case_(const char* fmt, ...);
+void acutest_message_(const char* fmt, ...);
+void acutest_dump_(const char* title, const void* addr, size_t size);
+void acutest_abort_(void) ACUTEST_ATTRIBUTE_(noreturn);
+
+
+#ifndef TEST_NO_MAIN
+
+static char* acutest_argv0_ = NULL;
+static size_t acutest_list_size_ = 0;
+static struct acutest_test_data_* acutest_test_data_ = NULL;
+static size_t acutest_count_ = 0;
+static int acutest_no_exec_ = -1;
+static int acutest_no_summary_ = 0;
+static int acutest_tap_ = 0;
+static int acutest_skip_mode_ = 0;
+static int acutest_worker_ = 0;
+static int acutest_worker_index_ = 0;
+static int acutest_cond_failed_ = 0;
+static int acutest_was_aborted_ = 0;
+static FILE *acutest_xml_output_ = NULL;
+
+static int acutest_stat_failed_units_ = 0;
+static int acutest_stat_run_units_ = 0;
+
+static const struct acutest_test_* acutest_current_test_ = NULL;
+static int acutest_current_index_ = 0;
+static char acutest_case_name_[TEST_CASE_MAXSIZE] = "";
+static int acutest_test_already_logged_ = 0;
+static int acutest_case_already_logged_ = 0;
+static int acutest_verbose_level_ = 2;
+static int acutest_test_failures_ = 0;
+static int acutest_colorize_ = 0;
+static int acutest_timer_ = 0;
+
+static int acutest_abort_has_jmp_buf_ = 0;
+static jmp_buf acutest_abort_jmp_buf_;
+
+
+static void
+acutest_cleanup_(void)
+{
+ free((void*) acutest_test_data_);
+}
+
+static void ACUTEST_ATTRIBUTE_(noreturn)
+acutest_exit_(int exit_code)
+{
+ acutest_cleanup_();
+ exit(exit_code);
+}
+
+#if defined ACUTEST_WIN_
+ typedef LARGE_INTEGER acutest_timer_type_;
+ static LARGE_INTEGER acutest_timer_freq_;
+ static acutest_timer_type_ acutest_timer_start_;
+ static acutest_timer_type_ acutest_timer_end_;
+
+ static void
+ acutest_timer_init_(void)
+ {
+ QueryPerformanceFrequency(&acutest_timer_freq_);
+ }
+
+ static void
+ acutest_timer_get_time_(LARGE_INTEGER* ts)
+ {
+ QueryPerformanceCounter(ts);
+ }
+
+ static double
+ acutest_timer_diff_(LARGE_INTEGER start, LARGE_INTEGER end)
+ {
+ double duration = (double)(end.QuadPart - start.QuadPart);
+ duration /= (double)acutest_timer_freq_.QuadPart;
+ return duration;
+ }
+
+ static void
+ acutest_timer_print_diff_(void)
+ {
+ printf("%.6lf secs", acutest_timer_diff_(acutest_timer_start_, acutest_timer_end_));
+ }
+#elif defined ACUTEST_HAS_POSIX_TIMER_
+ static clockid_t acutest_timer_id_;
+ typedef struct timespec acutest_timer_type_;
+ static acutest_timer_type_ acutest_timer_start_;
+ static acutest_timer_type_ acutest_timer_end_;
+
+ static void
+ acutest_timer_init_(void)
+ {
+ if(acutest_timer_ == 1)
+ acutest_timer_id_ = CLOCK_MONOTONIC;
+ else if(acutest_timer_ == 2)
+ acutest_timer_id_ = CLOCK_PROCESS_CPUTIME_ID;
+ }
+
+ static void
+ acutest_timer_get_time_(struct timespec* ts)
+ {
+ clock_gettime(acutest_timer_id_, ts);
+ }
+
+ static double
+ acutest_timer_diff_(struct timespec start, struct timespec end)
+ {
+ double endns;
+ double startns;
+
+ endns = end.tv_sec;
+ endns *= 1e9;
+ endns += end.tv_nsec;
+
+ startns = start.tv_sec;
+ startns *= 1e9;
+ startns += start.tv_nsec;
+
+ return ((endns - startns)/ 1e9);
+ }
+
+ static void
+ acutest_timer_print_diff_(void)
+ {
+ printf("%.6lf secs",
+ acutest_timer_diff_(acutest_timer_start_, acutest_timer_end_));
+ }
+#else
+ typedef int acutest_timer_type_;
+ static acutest_timer_type_ acutest_timer_start_;
+ static acutest_timer_type_ acutest_timer_end_;
+
+ void
+ acutest_timer_init_(void)
+ {}
+
+ static void
+ acutest_timer_get_time_(int* ts)
+ {
+ (void) ts;
+ }
+
+ static double
+ acutest_timer_diff_(int start, int end)
+ {
+ (void) start;
+ (void) end;
+ return 0.0;
+ }
+
+ static void
+ acutest_timer_print_diff_(void)
+ {}
+#endif
+
+#define ACUTEST_COLOR_DEFAULT_ 0
+#define ACUTEST_COLOR_GREEN_ 1
+#define ACUTEST_COLOR_RED_ 2
+#define ACUTEST_COLOR_DEFAULT_INTENSIVE_ 3
+#define ACUTEST_COLOR_GREEN_INTENSIVE_ 4
+#define ACUTEST_COLOR_RED_INTENSIVE_ 5
+
+static int ACUTEST_ATTRIBUTE_(format (printf, 2, 3))
+acutest_colored_printf_(int color, const char* fmt, ...)
+{
+ va_list args;
+ char buffer[256];
+ int n;
+
+ va_start(args, fmt);
+ vsnprintf(buffer, sizeof(buffer), fmt, args);
+ va_end(args);
+ buffer[sizeof(buffer)-1] = '\0';
+
+ if(!acutest_colorize_) {
+ return printf("%s", buffer);
+ }
+
+#if defined ACUTEST_UNIX_
+ {
+ const char* col_str;
+ switch(color) {
+ case ACUTEST_COLOR_GREEN_: col_str = "\033[0;32m"; break;
+ case ACUTEST_COLOR_RED_: col_str = "\033[0;31m"; break;
+ case ACUTEST_COLOR_GREEN_INTENSIVE_: col_str = "\033[1;32m"; break;
+ case ACUTEST_COLOR_RED_INTENSIVE_: col_str = "\033[1;31m"; break;
+ case ACUTEST_COLOR_DEFAULT_INTENSIVE_: col_str = "\033[1m"; break;
+ default: col_str = "\033[0m"; break;
+ }
+ printf("%s", col_str);
+ n = printf("%s", buffer);
+ printf("\033[0m");
+ return n;
+ }
+#elif defined ACUTEST_WIN_
+ {
+ HANDLE h;
+ CONSOLE_SCREEN_BUFFER_INFO info;
+ WORD attr;
+
+ h = GetStdHandle(STD_OUTPUT_HANDLE);
+ GetConsoleScreenBufferInfo(h, &info);
+
+ switch(color) {
+ case ACUTEST_COLOR_GREEN_: attr = FOREGROUND_GREEN; break;
+ case ACUTEST_COLOR_RED_: attr = FOREGROUND_RED; break;
+ case ACUTEST_COLOR_GREEN_INTENSIVE_: attr = FOREGROUND_GREEN | FOREGROUND_INTENSITY; break;
+ case ACUTEST_COLOR_RED_INTENSIVE_: attr = FOREGROUND_RED | FOREGROUND_INTENSITY; break;
+ case ACUTEST_COLOR_DEFAULT_INTENSIVE_: attr = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_INTENSITY; break;
+ default: attr = 0; break;
+ }
+ if(attr != 0)
+ SetConsoleTextAttribute(h, attr);
+ n = printf("%s", buffer);
+ SetConsoleTextAttribute(h, info.wAttributes);
+ return n;
+ }
+#else
+ n = printf("%s", buffer);
+ return n;
+#endif
+}
+
+static void
+acutest_begin_test_line_(const struct acutest_test_* test)
+{
+ if(!acutest_tap_) {
+ if(acutest_verbose_level_ >= 3) {
+ acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Test %s:\n", test->name);
+ acutest_test_already_logged_++;
+ } else if(acutest_verbose_level_ >= 1) {
+ int n;
+ char spaces[48];
+
+ n = acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Test %s... ", test->name);
+ memset(spaces, ' ', sizeof(spaces));
+ if(n < (int) sizeof(spaces))
+ printf("%.*s", (int) sizeof(spaces) - n, spaces);
+ } else {
+ acutest_test_already_logged_ = 1;
+ }
+ }
+}
+
+static void
+acutest_finish_test_line_(int result)
+{
+ if(acutest_tap_) {
+ const char* str = (result == 0) ? "ok" : "not ok";
+
+ printf("%s %d - %s\n", str, acutest_current_index_ + 1, acutest_current_test_->name);
+
+ if(result == 0 && acutest_timer_) {
+ printf("# Duration: ");
+ acutest_timer_print_diff_();
+ printf("\n");
+ }
+ } else {
+ int color = (result == 0) ? ACUTEST_COLOR_GREEN_INTENSIVE_ : ACUTEST_COLOR_RED_INTENSIVE_;
+ const char* str = (result == 0) ? "OK" : "FAILED";
+ printf("[ ");
+ acutest_colored_printf_(color, "%s", str);
+ printf(" ]");
+
+ if(result == 0 && acutest_timer_) {
+ printf(" ");
+ acutest_timer_print_diff_();
+ }
+
+ printf("\n");
+ }
+}
+
+static void
+acutest_line_indent_(int level)
+{
+ static const char spaces[] = " ";
+ int n = level * 2;
+
+ if(acutest_tap_ && n > 0) {
+ n--;
+ printf("#");
+ }
+
+ while(n > 16) {
+ printf("%s", spaces);
+ n -= 16;
+ }
+ printf("%.*s", n, spaces);
+}
+
+int ACUTEST_ATTRIBUTE_(format (printf, 4, 5))
+acutest_check_(int cond, const char* file, int line, const char* fmt, ...)
+{
+ const char *result_str;
+ int result_color;
+ int verbose_level;
+
+ if(cond) {
+ result_str = "ok";
+ result_color = ACUTEST_COLOR_GREEN_;
+ verbose_level = 3;
+ } else {
+ if(!acutest_test_already_logged_ && acutest_current_test_ != NULL)
+ acutest_finish_test_line_(-1);
+
+ result_str = "failed";
+ result_color = ACUTEST_COLOR_RED_;
+ verbose_level = 2;
+ acutest_test_failures_++;
+ acutest_test_already_logged_++;
+ }
+
+ if(acutest_verbose_level_ >= verbose_level) {
+ va_list args;
+
+ if(!acutest_case_already_logged_ && acutest_case_name_[0]) {
+ acutest_line_indent_(1);
+ acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Case %s:\n", acutest_case_name_);
+ acutest_test_already_logged_++;
+ acutest_case_already_logged_++;
+ }
+
+ acutest_line_indent_(acutest_case_name_[0] ? 2 : 1);
+ if(file != NULL) {
+#ifdef ACUTEST_WIN_
+ const char* lastsep1 = strrchr(file, '\\');
+ const char* lastsep2 = strrchr(file, '/');
+ if(lastsep1 == NULL)
+ lastsep1 = file-1;
+ if(lastsep2 == NULL)
+ lastsep2 = file-1;
+ file = (lastsep1 > lastsep2 ? lastsep1 : lastsep2) + 1;
+#else
+ const char* lastsep = strrchr(file, '/');
+ if(lastsep != NULL)
+ file = lastsep+1;
+#endif
+ printf("%s:%d: Check ", file, line);
+ }
+
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+
+ printf("... ");
+ acutest_colored_printf_(result_color, "%s", result_str);
+ printf("\n");
+ acutest_test_already_logged_++;
+ }
+
+ acutest_cond_failed_ = (cond == 0);
+ return !acutest_cond_failed_;
+}
+
+void ACUTEST_ATTRIBUTE_(format (printf, 1, 2))
+acutest_case_(const char* fmt, ...)
+{
+ va_list args;
+
+ if(acutest_verbose_level_ < 2)
+ return;
+
+ if(acutest_case_name_[0]) {
+ acutest_case_already_logged_ = 0;
+ acutest_case_name_[0] = '\0';
+ }
+
+ if(fmt == NULL)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(acutest_case_name_, sizeof(acutest_case_name_) - 1, fmt, args);
+ va_end(args);
+ acutest_case_name_[sizeof(acutest_case_name_) - 1] = '\0';
+
+ if(acutest_verbose_level_ >= 3) {
+ acutest_line_indent_(1);
+ acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Case %s:\n", acutest_case_name_);
+ acutest_test_already_logged_++;
+ acutest_case_already_logged_++;
+ }
+}
+
+void ACUTEST_ATTRIBUTE_(format (printf, 1, 2))
+acutest_message_(const char* fmt, ...)
+{
+ char buffer[TEST_MSG_MAXSIZE];
+ char* line_beg;
+ char* line_end;
+ va_list args;
+
+ if(acutest_verbose_level_ < 2)
+ return;
+
+ /* We allow extra message only when something is already wrong in the
+ * current test. */
+ if(acutest_current_test_ == NULL || !acutest_cond_failed_)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(buffer, TEST_MSG_MAXSIZE, fmt, args);
+ va_end(args);
+ buffer[TEST_MSG_MAXSIZE-1] = '\0';
+
+ line_beg = buffer;
+ while(1) {
+ line_end = strchr(line_beg, '\n');
+ if(line_end == NULL)
+ break;
+ acutest_line_indent_(acutest_case_name_[0] ? 3 : 2);
+ printf("%.*s\n", (int)(line_end - line_beg), line_beg);
+ line_beg = line_end + 1;
+ }
+ if(line_beg[0] != '\0') {
+ acutest_line_indent_(acutest_case_name_[0] ? 3 : 2);
+ printf("%s\n", line_beg);
+ }
+}
+
+void
+acutest_dump_(const char* title, const void* addr, size_t size)
+{
+ static const size_t BYTES_PER_LINE = 16;
+ size_t line_beg;
+ size_t truncate = 0;
+
+ if(acutest_verbose_level_ < 2)
+ return;
+
+ /* We allow extra message only when something is already wrong in the
+ * current test. */
+ if(acutest_current_test_ == NULL || !acutest_cond_failed_)
+ return;
+
+ if(size > TEST_DUMP_MAXSIZE) {
+ truncate = size - TEST_DUMP_MAXSIZE;
+ size = TEST_DUMP_MAXSIZE;
+ }
+
+ acutest_line_indent_(acutest_case_name_[0] ? 3 : 2);
+ printf((title[strlen(title)-1] == ':') ? "%s\n" : "%s:\n", title);
+
+ for(line_beg = 0; line_beg < size; line_beg += BYTES_PER_LINE) {
+ size_t line_end = line_beg + BYTES_PER_LINE;
+ size_t off;
+
+ acutest_line_indent_(acutest_case_name_[0] ? 4 : 3);
+ printf("%08lx: ", (unsigned long)line_beg);
+ for(off = line_beg; off < line_end; off++) {
+ if(off < size)
+ printf(" %02x", ((const unsigned char*)addr)[off]);
+ else
+ printf(" ");
+ }
+
+ printf(" ");
+ for(off = line_beg; off < line_end; off++) {
+ unsigned char byte = ((const unsigned char*)addr)[off];
+ if(off < size)
+ printf("%c", (iscntrl(byte) ? '.' : byte));
+ else
+ break;
+ }
+
+ printf("\n");
+ }
+
+ if(truncate > 0) {
+ acutest_line_indent_(acutest_case_name_[0] ? 4 : 3);
+ printf(" ... (and more %u bytes)\n", (unsigned) truncate);
+ }
+}
+
+/* This is called just before each test */
+static void
+acutest_init_(const char *test_name)
+{
+#ifdef TEST_INIT
+ TEST_INIT
+ ; /* Allow for a single unterminated function call */
+#endif
+
+ /* Suppress any warnings about unused variable. */
+ (void) test_name;
+}
+
+/* This is called after each test */
+static void
+acutest_fini_(const char *test_name)
+{
+#ifdef TEST_FINI
+ TEST_FINI
+ ; /* Allow for a single unterminated function call */
+#endif
+
+ /* Suppress any warnings about unused variable. */
+ (void) test_name;
+}
+
+void
+acutest_abort_(void)
+{
+ if(acutest_abort_has_jmp_buf_) {
+ longjmp(acutest_abort_jmp_buf_, 1);
+ } else {
+ if(acutest_current_test_ != NULL)
+ acutest_fini_(acutest_current_test_->name);
+ abort();
+ }
+}
+
+static void
+acutest_list_names_(void)
+{
+ const struct acutest_test_* test;
+
+ printf("Unit tests:\n");
+ for(test = &acutest_list_[0]; test->func != NULL; test++)
+ printf(" %s\n", test->name);
+}
+
+static void
+acutest_remember_(int i)
+{
+ if(acutest_test_data_[i].flags & ACUTEST_FLAG_RUN_)
+ return;
+
+ acutest_test_data_[i].flags |= ACUTEST_FLAG_RUN_;
+ acutest_count_++;
+}
+
+static void
+acutest_set_success_(int i, int success)
+{
+ acutest_test_data_[i].flags |= success ? ACUTEST_FLAG_SUCCESS_ : ACUTEST_FLAG_FAILURE_;
+}
+
+static void
+acutest_set_duration_(int i, double duration)
+{
+ acutest_test_data_[i].duration = duration;
+}
+
+static int
+acutest_name_contains_word_(const char* name, const char* pattern)
+{
+ static const char word_delim[] = " \t-_/.,:;";
+ const char* substr;
+ size_t pattern_len;
+
+ pattern_len = strlen(pattern);
+
+ substr = strstr(name, pattern);
+ while(substr != NULL) {
+ int starts_on_word_boundary = (substr == name || strchr(word_delim, substr[-1]) != NULL);
+ int ends_on_word_boundary = (substr[pattern_len] == '\0' || strchr(word_delim, substr[pattern_len]) != NULL);
+
+ if(starts_on_word_boundary && ends_on_word_boundary)
+ return 1;
+
+ substr = strstr(substr+1, pattern);
+ }
+
+ return 0;
+}
+
+static int
+acutest_lookup_(const char* pattern)
+{
+ int i;
+ int n = 0;
+
+ /* Try exact match. */
+ for(i = 0; i < (int) acutest_list_size_; i++) {
+ if(strcmp(acutest_list_[i].name, pattern) == 0) {
+ acutest_remember_(i);
+ n++;
+ break;
+ }
+ }
+ if(n > 0)
+ return n;
+
+ /* Try word match. */
+ for(i = 0; i < (int) acutest_list_size_; i++) {
+ if(acutest_name_contains_word_(acutest_list_[i].name, pattern)) {
+ acutest_remember_(i);
+ n++;
+ }
+ }
+ if(n > 0)
+ return n;
+
+ /* Try relaxed match. */
+ for(i = 0; i < (int) acutest_list_size_; i++) {
+ if(strstr(acutest_list_[i].name, pattern) != NULL) {
+ acutest_remember_(i);
+ n++;
+ }
+ }
+
+ return n;
+}
+
+
+/* Called if anything goes bad in Acutest, or if the unit test ends in other
+ * way then by normal returning from its function (e.g. exception or some
+ * abnormal child process termination). */
+static void ACUTEST_ATTRIBUTE_(format (printf, 1, 2))
+acutest_error_(const char* fmt, ...)
+{
+ if(acutest_verbose_level_ == 0)
+ return;
+
+ if(acutest_verbose_level_ >= 2) {
+ va_list args;
+
+ acutest_line_indent_(1);
+ if(acutest_verbose_level_ >= 3)
+ acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "ERROR: ");
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+ printf("\n");
+ }
+
+ if(acutest_verbose_level_ >= 3) {
+ printf("\n");
+ }
+}
+
+/* Call directly the given test unit function. */
+static int
+acutest_do_run_(const struct acutest_test_* test, int index)
+{
+ int status = -1;
+
+ acutest_was_aborted_ = 0;
+ acutest_current_test_ = test;
+ acutest_current_index_ = index;
+ acutest_test_failures_ = 0;
+ acutest_test_already_logged_ = 0;
+ acutest_cond_failed_ = 0;
+
+#ifdef __cplusplus
+ try {
+#endif
+ acutest_init_(test->name);
+ acutest_begin_test_line_(test);
+
+ /* This is good to do in case the test unit crashes. */
+ fflush(stdout);
+ fflush(stderr);
+
+ if(!acutest_worker_) {
+ acutest_abort_has_jmp_buf_ = 1;
+ if(setjmp(acutest_abort_jmp_buf_) != 0) {
+ acutest_was_aborted_ = 1;
+ goto aborted;
+ }
+ }
+
+ acutest_timer_get_time_(&acutest_timer_start_);
+ test->func();
+aborted:
+ acutest_abort_has_jmp_buf_ = 0;
+ acutest_timer_get_time_(&acutest_timer_end_);
+
+ if(acutest_verbose_level_ >= 3) {
+ acutest_line_indent_(1);
+ if(acutest_test_failures_ == 0) {
+ acutest_colored_printf_(ACUTEST_COLOR_GREEN_INTENSIVE_, "SUCCESS: ");
+ printf("All conditions have passed.\n");
+
+ if(acutest_timer_) {
+ acutest_line_indent_(1);
+ printf("Duration: ");
+ acutest_timer_print_diff_();
+ printf("\n");
+ }
+ } else {
+ acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: ");
+ if(!acutest_was_aborted_) {
+ printf("%d condition%s %s failed.\n",
+ acutest_test_failures_,
+ (acutest_test_failures_ == 1) ? "" : "s",
+ (acutest_test_failures_ == 1) ? "has" : "have");
+ } else {
+ printf("Aborted.\n");
+ }
+ }
+ printf("\n");
+ } else if(acutest_verbose_level_ >= 1 && acutest_test_failures_ == 0) {
+ acutest_finish_test_line_(0);
+ }
+
+ status = (acutest_test_failures_ == 0) ? 0 : -1;
+
+#ifdef __cplusplus
+ } catch(std::exception& e) {
+ const char* what = e.what();
+ acutest_check_(0, NULL, 0, "Threw std::exception");
+ if(what != NULL)
+ acutest_message_("std::exception::what(): %s", what);
+
+ if(acutest_verbose_level_ >= 3) {
+ acutest_line_indent_(1);
+ acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: ");
+ printf("C++ exception.\n\n");
+ }
+ } catch(...) {
+ acutest_check_(0, NULL, 0, "Threw an exception");
+
+ if(acutest_verbose_level_ >= 3) {
+ acutest_line_indent_(1);
+ acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: ");
+ printf("C++ exception.\n\n");
+ }
+ }
+#endif
+
+ acutest_fini_(test->name);
+ acutest_case_(NULL);
+ acutest_current_test_ = NULL;
+
+ return status;
+}
+
+/* Trigger the unit test. If possible (and not suppressed) it starts a child
+ * process who calls acutest_do_run_(), otherwise it calls acutest_do_run_()
+ * directly. */
+static void
+acutest_run_(const struct acutest_test_* test, int index, int master_index)
+{
+ int failed = 1;
+ acutest_timer_type_ start, end;
+
+ acutest_current_test_ = test;
+ acutest_test_already_logged_ = 0;
+ acutest_timer_get_time_(&start);
+
+ if(!acutest_no_exec_) {
+
+#if defined(ACUTEST_UNIX_)
+
+ pid_t pid;
+ int exit_code;
+
+ /* Make sure the child starts with empty I/O buffers. */
+ fflush(stdout);
+ fflush(stderr);
+
+ pid = fork();
+ if(pid == (pid_t)-1) {
+ acutest_error_("Cannot fork. %s [%d]", strerror(errno), errno);
+ failed = 1;
+ } else if(pid == 0) {
+ /* Child: Do the test. */
+ acutest_worker_ = 1;
+ failed = (acutest_do_run_(test, index) != 0);
+ acutest_exit_(failed ? 1 : 0);
+ } else {
+ /* Parent: Wait until child terminates and analyze its exit code. */
+ waitpid(pid, &exit_code, 0);
+ if(WIFEXITED(exit_code)) {
+ switch(WEXITSTATUS(exit_code)) {
+ case 0: failed = 0; break; /* test has passed. */
+ case 1: /* noop */ break; /* "normal" failure. */
+ default: acutest_error_("Unexpected exit code [%d]", WEXITSTATUS(exit_code));
+ }
+ } else if(WIFSIGNALED(exit_code)) {
+ char tmp[32];
+ const char* signame;
+ switch(WTERMSIG(exit_code)) {
+ case SIGINT: signame = "SIGINT"; break;
+ case SIGHUP: signame = "SIGHUP"; break;
+ case SIGQUIT: signame = "SIGQUIT"; break;
+ case SIGABRT: signame = "SIGABRT"; break;
+ case SIGKILL: signame = "SIGKILL"; break;
+ case SIGSEGV: signame = "SIGSEGV"; break;
+ case SIGILL: signame = "SIGILL"; break;
+ case SIGTERM: signame = "SIGTERM"; break;
+ default: sprintf(tmp, "signal %d", WTERMSIG(exit_code)); signame = tmp; break;
+ }
+ acutest_error_("Test interrupted by %s.", signame);
+ } else {
+ acutest_error_("Test ended in an unexpected way [%d].", exit_code);
+ }
+ }
+
+#elif defined(ACUTEST_WIN_)
+
+ char buffer[512] = {0};
+ STARTUPINFOA startupInfo;
+ PROCESS_INFORMATION processInfo;
+ DWORD exitCode;
+
+ /* Windows has no fork(). So we propagate all info into the child
+ * through a command line arguments. */
+ _snprintf(buffer, sizeof(buffer)-1,
+ "%s --worker=%d %s --no-exec --no-summary %s --verbose=%d --color=%s -- \"%s\"",
+ acutest_argv0_, index, acutest_timer_ ? "--time" : "",
+ acutest_tap_ ? "--tap" : "", acutest_verbose_level_,
+ acutest_colorize_ ? "always" : "never",
+ test->name);
+ memset(&startupInfo, 0, sizeof(startupInfo));
+ startupInfo.cb = sizeof(STARTUPINFO);
+ if(CreateProcessA(NULL, buffer, NULL, NULL, FALSE, 0, NULL, NULL, &startupInfo, &processInfo)) {
+ WaitForSingleObject(processInfo.hProcess, INFINITE);
+ GetExitCodeProcess(processInfo.hProcess, &exitCode);
+ CloseHandle(processInfo.hThread);
+ CloseHandle(processInfo.hProcess);
+ failed = (exitCode != 0);
+ if(exitCode > 1) {
+ switch(exitCode) {
+ case 3: acutest_error_("Aborted."); break;
+ case 0xC0000005: acutest_error_("Access violation."); break;
+ default: acutest_error_("Test ended in an unexpected way [%lu].", exitCode); break;
+ }
+ }
+ } else {
+ acutest_error_("Cannot create unit test subprocess [%ld].", GetLastError());
+ failed = 1;
+ }
+
+#else
+
+ /* A platform where we don't know how to run child process. */
+ failed = (acutest_do_run_(test, index) != 0);
+
+#endif
+
+ } else {
+ /* Child processes suppressed through --no-exec. */
+ failed = (acutest_do_run_(test, index) != 0);
+ }
+ acutest_timer_get_time_(&end);
+
+ acutest_current_test_ = NULL;
+
+ acutest_stat_run_units_++;
+ if(failed)
+ acutest_stat_failed_units_++;
+
+ acutest_set_success_(master_index, !failed);
+ acutest_set_duration_(master_index, acutest_timer_diff_(start, end));
+}
+
+#if defined(ACUTEST_WIN_)
+/* Callback for SEH events. */
+static LONG CALLBACK
+acutest_seh_exception_filter_(EXCEPTION_POINTERS *ptrs)
+{
+ acutest_check_(0, NULL, 0, "Unhandled SEH exception");
+ acutest_message_("Exception code: 0x%08lx", ptrs->ExceptionRecord->ExceptionCode);
+ acutest_message_("Exception address: 0x%p", ptrs->ExceptionRecord->ExceptionAddress);
+
+ fflush(stdout);
+ fflush(stderr);
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+#endif
+
+
+#define ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ 0x0001
+#define ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ 0x0002
+
+#define ACUTEST_CMDLINE_OPTID_NONE_ 0
+#define ACUTEST_CMDLINE_OPTID_UNKNOWN_ (-0x7fffffff + 0)
+#define ACUTEST_CMDLINE_OPTID_MISSINGARG_ (-0x7fffffff + 1)
+#define ACUTEST_CMDLINE_OPTID_BOGUSARG_ (-0x7fffffff + 2)
+
+typedef struct acutest_test_CMDLINE_OPTION_ {
+ char shortname;
+ const char* longname;
+ int id;
+ unsigned flags;
+} ACUTEST_CMDLINE_OPTION_;
+
+static int
+acutest_cmdline_handle_short_opt_group_(const ACUTEST_CMDLINE_OPTION_* options,
+ const char* arggroup,
+ int (*callback)(int /*optval*/, const char* /*arg*/))
+{
+ const ACUTEST_CMDLINE_OPTION_* opt;
+ int i;
+ int ret = 0;
+
+ for(i = 0; arggroup[i] != '\0'; i++) {
+ for(opt = options; opt->id != 0; opt++) {
+ if(arggroup[i] == opt->shortname)
+ break;
+ }
+
+ if(opt->id != 0 && !(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) {
+ ret = callback(opt->id, NULL);
+ } else {
+ /* Unknown option. */
+ char badoptname[3];
+ badoptname[0] = '-';
+ badoptname[1] = arggroup[i];
+ badoptname[2] = '\0';
+ ret = callback((opt->id != 0 ? ACUTEST_CMDLINE_OPTID_MISSINGARG_ : ACUTEST_CMDLINE_OPTID_UNKNOWN_),
+ badoptname);
+ }
+
+ if(ret != 0)
+ break;
+ }
+
+ return ret;
+}
+
+#define ACUTEST_CMDLINE_AUXBUF_SIZE_ 32
+
+static int
+acutest_cmdline_read_(const ACUTEST_CMDLINE_OPTION_* options, int argc, char** argv,
+ int (*callback)(int /*optval*/, const char* /*arg*/))
+{
+
+ const ACUTEST_CMDLINE_OPTION_* opt;
+ char auxbuf[ACUTEST_CMDLINE_AUXBUF_SIZE_+1];
+ int after_doubledash = 0;
+ int i = 1;
+ int ret = 0;
+
+ auxbuf[ACUTEST_CMDLINE_AUXBUF_SIZE_] = '\0';
+
+ while(i < argc) {
+ if(after_doubledash || strcmp(argv[i], "-") == 0) {
+ /* Non-option argument. */
+ ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]);
+ } else if(strcmp(argv[i], "--") == 0) {
+ /* End of options. All the remaining members are non-option arguments. */
+ after_doubledash = 1;
+ } else if(argv[i][0] != '-') {
+ /* Non-option argument. */
+ ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]);
+ } else {
+ for(opt = options; opt->id != 0; opt++) {
+ if(opt->longname != NULL && strncmp(argv[i], "--", 2) == 0) {
+ size_t len = strlen(opt->longname);
+ if(strncmp(argv[i]+2, opt->longname, len) == 0) {
+ /* Regular long option. */
+ if(argv[i][2+len] == '\0') {
+ /* with no argument provided. */
+ if(!(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_))
+ ret = callback(opt->id, NULL);
+ else
+ ret = callback(ACUTEST_CMDLINE_OPTID_MISSINGARG_, argv[i]);
+ break;
+ } else if(argv[i][2+len] == '=') {
+ /* with an argument provided. */
+ if(opt->flags & (ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ | ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) {
+ ret = callback(opt->id, argv[i]+2+len+1);
+ } else {
+ sprintf(auxbuf, "--%s", opt->longname);
+ ret = callback(ACUTEST_CMDLINE_OPTID_BOGUSARG_, auxbuf);
+ }
+ break;
+ } else {
+ continue;
+ }
+ }
+ } else if(opt->shortname != '\0' && argv[i][0] == '-') {
+ if(argv[i][1] == opt->shortname) {
+ /* Regular short option. */
+ if(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_) {
+ if(argv[i][2] != '\0')
+ ret = callback(opt->id, argv[i]+2);
+ else if(i+1 < argc)
+ ret = callback(opt->id, argv[++i]);
+ else
+ ret = callback(ACUTEST_CMDLINE_OPTID_MISSINGARG_, argv[i]);
+ break;
+ } else {
+ ret = callback(opt->id, NULL);
+
+ /* There might be more (argument-less) short options
+ * grouped together. */
+ if(ret == 0 && argv[i][2] != '\0')
+ ret = acutest_cmdline_handle_short_opt_group_(options, argv[i]+2, callback);
+ break;
+ }
+ }
+ }
+ }
+
+ if(opt->id == 0) { /* still not handled? */
+ if(argv[i][0] != '-') {
+ /* Non-option argument. */
+ ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]);
+ } else {
+ /* Unknown option. */
+ char* badoptname = argv[i];
+
+ if(strncmp(badoptname, "--", 2) == 0) {
+ /* Strip any argument from the long option. */
+ char* assignment = strchr(badoptname, '=');
+ if(assignment != NULL) {
+ size_t len = assignment - badoptname;
+ if(len > ACUTEST_CMDLINE_AUXBUF_SIZE_)
+ len = ACUTEST_CMDLINE_AUXBUF_SIZE_;
+ strncpy(auxbuf, badoptname, len);
+ auxbuf[len] = '\0';
+ badoptname = auxbuf;
+ }
+ }
+
+ ret = callback(ACUTEST_CMDLINE_OPTID_UNKNOWN_, badoptname);
+ }
+ }
+ }
+
+ if(ret != 0)
+ return ret;
+ i++;
+ }
+
+ return ret;
+}
+
+static void
+acutest_help_(void)
+{
+ printf("Usage: %s [options] [test...]\n", acutest_argv0_);
+ printf("\n");
+ printf("Run the specified unit tests; or if the option '--skip' is used, run all\n");
+ printf("tests in the suite but those listed. By default, if no tests are specified\n");
+ printf("on the command line, all unit tests in the suite are run.\n");
+ printf("\n");
+ printf("Options:\n");
+ printf(" -s, --skip Execute all unit tests but the listed ones\n");
+ printf(" --exec[=WHEN] If supported, execute unit tests as child processes\n");
+ printf(" (WHEN is one of 'auto', 'always', 'never')\n");
+ printf(" -E, --no-exec Same as --exec=never\n");
+#if defined ACUTEST_WIN_
+ printf(" -t, --time Measure test duration\n");
+#elif defined ACUTEST_HAS_POSIX_TIMER_
+ printf(" -t, --time Measure test duration (real time)\n");
+ printf(" --time=TIMER Measure test duration, using given timer\n");
+ printf(" (TIMER is one of 'real', 'cpu')\n");
+#endif
+ printf(" --no-summary Suppress printing of test results summary\n");
+ printf(" --tap Produce TAP-compliant output\n");
+ printf(" (See https://testanything.org/)\n");
+ printf(" -x, --xml-output=FILE Enable XUnit output to the given file\n");
+ printf(" -l, --list List unit tests in the suite and exit\n");
+ printf(" -v, --verbose Make output more verbose\n");
+ printf(" --verbose=LEVEL Set verbose level to LEVEL:\n");
+ printf(" 0 ... Be silent\n");
+ printf(" 1 ... Output one line per test (and summary)\n");
+ printf(" 2 ... As 1 and failed conditions (this is default)\n");
+ printf(" 3 ... As 1 and all conditions (and extended summary)\n");
+ printf(" -q, --quiet Same as --verbose=0\n");
+ printf(" --color[=WHEN] Enable colorized output\n");
+ printf(" (WHEN is one of 'auto', 'always', 'never')\n");
+ printf(" --no-color Same as --color=never\n");
+ printf(" -h, --help Display this help and exit\n");
+
+ if(acutest_list_size_ < 16) {
+ printf("\n");
+ acutest_list_names_();
+ }
+}
+
+static const ACUTEST_CMDLINE_OPTION_ acutest_cmdline_options_[] = {
+ { 's', "skip", 's', 0 },
+ { 0, "exec", 'e', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ },
+ { 'E', "no-exec", 'E', 0 },
+#if defined ACUTEST_WIN_
+ { 't', "time", 't', 0 },
+ { 0, "timer", 't', 0 }, /* kept for compatibility */
+#elif defined ACUTEST_HAS_POSIX_TIMER_
+ { 't', "time", 't', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ },
+ { 0, "timer", 't', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ }, /* kept for compatibility */
+#endif
+ { 0, "no-summary", 'S', 0 },
+ { 0, "tap", 'T', 0 },
+ { 'l', "list", 'l', 0 },
+ { 'v', "verbose", 'v', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ },
+ { 'q', "quiet", 'q', 0 },
+ { 0, "color", 'c', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ },
+ { 0, "no-color", 'C', 0 },
+ { 'h', "help", 'h', 0 },
+ { 0, "worker", 'w', ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ }, /* internal */
+ { 'x', "xml-output", 'x', ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ },
+ { 0, NULL, 0, 0 }
+};
+
+static int
+acutest_cmdline_callback_(int id, const char* arg)
+{
+ switch(id) {
+ case 's':
+ acutest_skip_mode_ = 1;
+ break;
+
+ case 'e':
+ if(arg == NULL || strcmp(arg, "always") == 0) {
+ acutest_no_exec_ = 0;
+ } else if(strcmp(arg, "never") == 0) {
+ acutest_no_exec_ = 1;
+ } else if(strcmp(arg, "auto") == 0) {
+ /*noop*/
+ } else {
+ fprintf(stderr, "%s: Unrecognized argument '%s' for option --exec.\n", acutest_argv0_, arg);
+ fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_);
+ acutest_exit_(2);
+ }
+ break;
+
+ case 'E':
+ acutest_no_exec_ = 1;
+ break;
+
+ case 't':
+#if defined ACUTEST_WIN_ || defined ACUTEST_HAS_POSIX_TIMER_
+ if(arg == NULL || strcmp(arg, "real") == 0) {
+ acutest_timer_ = 1;
+ #ifndef ACUTEST_WIN_
+ } else if(strcmp(arg, "cpu") == 0) {
+ acutest_timer_ = 2;
+ #endif
+ } else {
+ fprintf(stderr, "%s: Unrecognized argument '%s' for option --time.\n", acutest_argv0_, arg);
+ fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_);
+ acutest_exit_(2);
+ }
+#endif
+ break;
+
+ case 'S':
+ acutest_no_summary_ = 1;
+ break;
+
+ case 'T':
+ acutest_tap_ = 1;
+ break;
+
+ case 'l':
+ acutest_list_names_();
+ acutest_exit_(0);
+ break;
+
+ case 'v':
+ acutest_verbose_level_ = (arg != NULL ? atoi(arg) : acutest_verbose_level_+1);
+ break;
+
+ case 'q':
+ acutest_verbose_level_ = 0;
+ break;
+
+ case 'c':
+ if(arg == NULL || strcmp(arg, "always") == 0) {
+ acutest_colorize_ = 1;
+ } else if(strcmp(arg, "never") == 0) {
+ acutest_colorize_ = 0;
+ } else if(strcmp(arg, "auto") == 0) {
+ /*noop*/
+ } else {
+ fprintf(stderr, "%s: Unrecognized argument '%s' for option --color.\n", acutest_argv0_, arg);
+ fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_);
+ acutest_exit_(2);
+ }
+ break;
+
+ case 'C':
+ acutest_colorize_ = 0;
+ break;
+
+ case 'h':
+ acutest_help_();
+ acutest_exit_(0);
+ break;
+
+ case 'w':
+ acutest_worker_ = 1;
+ acutest_worker_index_ = atoi(arg);
+ break;
+ case 'x':
+ acutest_xml_output_ = fopen(arg, "w");
+ if (!acutest_xml_output_) {
+ fprintf(stderr, "Unable to open '%s': %s\n", arg, strerror(errno));
+ acutest_exit_(2);
+ }
+ break;
+
+ case 0:
+ if(acutest_lookup_(arg) == 0) {
+ fprintf(stderr, "%s: Unrecognized unit test '%s'\n", acutest_argv0_, arg);
+ fprintf(stderr, "Try '%s --list' for list of unit tests.\n", acutest_argv0_);
+ acutest_exit_(2);
+ }
+ break;
+
+ case ACUTEST_CMDLINE_OPTID_UNKNOWN_:
+ fprintf(stderr, "Unrecognized command line option '%s'.\n", arg);
+ fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_);
+ acutest_exit_(2);
+ break;
+
+ case ACUTEST_CMDLINE_OPTID_MISSINGARG_:
+ fprintf(stderr, "The command line option '%s' requires an argument.\n", arg);
+ fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_);
+ acutest_exit_(2);
+ break;
+
+ case ACUTEST_CMDLINE_OPTID_BOGUSARG_:
+ fprintf(stderr, "The command line option '%s' does not expect an argument.\n", arg);
+ fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_);
+ acutest_exit_(2);
+ break;
+ }
+
+ return 0;
+}
+
+
+#ifdef ACUTEST_LINUX_
+static int
+acutest_is_tracer_present_(void)
+{
+ /* Must be large enough so the line 'TracerPid: ${PID}' can fit in. */
+ static const int OVERLAP = 32;
+
+ char buf[512];
+ int tracer_present = 0;
+ int fd;
+ size_t n_read = 0;
+
+ fd = open("/proc/self/status", O_RDONLY);
+ if(fd == -1)
+ return 0;
+
+ while(1) {
+ static const char pattern[] = "TracerPid:";
+ const char* field;
+
+ while(n_read < sizeof(buf) - 1) {
+ ssize_t n;
+
+ n = read(fd, buf + n_read, sizeof(buf) - 1 - n_read);
+ if(n <= 0)
+ break;
+ n_read += n;
+ }
+ buf[n_read] = '\0';
+
+ field = strstr(buf, pattern);
+ if(field != NULL && field < buf + sizeof(buf) - OVERLAP) {
+ pid_t tracer_pid = (pid_t) atoi(field + sizeof(pattern) - 1);
+ tracer_present = (tracer_pid != 0);
+ break;
+ }
+
+ if(n_read == sizeof(buf) - 1) {
+ /* Move the tail with the potentially incomplete line we're looking
+ * for to the beginning of the buffer. */
+ memmove(buf, buf + sizeof(buf) - 1 - OVERLAP, OVERLAP);
+ n_read = OVERLAP;
+ } else {
+ break;
+ }
+ }
+
+ close(fd);
+ return tracer_present;
+}
+#endif
+
+#ifdef ACUTEST_MACOS_
+static bool
+acutest_AmIBeingDebugged(void)
+{
+ int junk;
+ int mib[4];
+ struct kinfo_proc info;
+ size_t size;
+
+ // Initialize the flags so that, if sysctl fails for some bizarre
+ // reason, we get a predictable result.
+ info.kp_proc.p_flag = 0;
+
+ // Initialize mib, which tells sysctl the info we want, in this case
+ // we're looking for information about a specific process ID.
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PID;
+ mib[3] = getpid();
+
+ // Call sysctl.
+ size = sizeof(info);
+ junk = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, NULL, 0);
+ assert(junk == 0);
+
+ // We're being debugged if the P_TRACED flag is set.
+ return ( (info.kp_proc.p_flag & P_TRACED) != 0 );
+}
+#endif
+
+int
+main(int argc, char** argv)
+{
+ int i;
+
+ acutest_argv0_ = argv[0];
+
+#if defined ACUTEST_UNIX_
+ acutest_colorize_ = isatty(STDOUT_FILENO);
+#elif defined ACUTEST_WIN_
+ #if defined _BORLANDC_
+ acutest_colorize_ = isatty(_fileno(stdout));
+ #else
+ acutest_colorize_ = _isatty(_fileno(stdout));
+ #endif
+#else
+ acutest_colorize_ = 0;
+#endif
+
+ /* Count all test units */
+ acutest_list_size_ = 0;
+ for(i = 0; acutest_list_[i].func != NULL; i++)
+ acutest_list_size_++;
+
+ acutest_test_data_ = (struct acutest_test_data_*)calloc(acutest_list_size_, sizeof(struct acutest_test_data_));
+ if(acutest_test_data_ == NULL) {
+ fprintf(stderr, "Out of memory.\n");
+ acutest_exit_(2);
+ }
+
+ /* Parse options */
+ acutest_cmdline_read_(acutest_cmdline_options_, argc, argv, acutest_cmdline_callback_);
+
+ /* Initialize the proper timer. */
+ acutest_timer_init_();
+
+#if defined(ACUTEST_WIN_)
+ SetUnhandledExceptionFilter(acutest_seh_exception_filter_);
+#ifdef _MSC_VER
+ _set_abort_behavior(0, _WRITE_ABORT_MSG);
+#endif
+#endif
+
+ /* By default, we want to run all tests. */
+ if(acutest_count_ == 0) {
+ for(i = 0; acutest_list_[i].func != NULL; i++)
+ acutest_remember_(i);
+ }
+
+ /* Guess whether we want to run unit tests as child processes. */
+ if(acutest_no_exec_ < 0) {
+ acutest_no_exec_ = 0;
+
+ if(acutest_count_ <= 1) {
+ acutest_no_exec_ = 1;
+ } else {
+#ifdef ACUTEST_WIN_
+ if(IsDebuggerPresent())
+ acutest_no_exec_ = 1;
+#endif
+#ifdef ACUTEST_LINUX_
+ if(acutest_is_tracer_present_())
+ acutest_no_exec_ = 1;
+#endif
+#ifdef ACUTEST_MACOS_
+ if(acutest_AmIBeingDebugged())
+ acutest_no_exec_ = 1;
+#endif
+#ifdef RUNNING_ON_VALGRIND
+ /* RUNNING_ON_VALGRIND is provided by optionally included <valgrind.h> */
+ if(RUNNING_ON_VALGRIND)
+ acutest_no_exec_ = 1;
+#endif
+ }
+ }
+
+ if(acutest_tap_) {
+ /* TAP requires we know test result ("ok", "not ok") before we output
+ * anything about the test, and this gets problematic for larger verbose
+ * levels. */
+ if(acutest_verbose_level_ > 2)
+ acutest_verbose_level_ = 2;
+
+ /* TAP harness should provide some summary. */
+ acutest_no_summary_ = 1;
+
+ if(!acutest_worker_)
+ printf("1..%d\n", (int) acutest_count_);
+ }
+
+ int index = acutest_worker_index_;
+ for(i = 0; acutest_list_[i].func != NULL; i++) {
+ int run = (acutest_test_data_[i].flags & ACUTEST_FLAG_RUN_);
+ if (acutest_skip_mode_) /* Run all tests except those listed. */
+ run = !run;
+ if(run)
+ acutest_run_(&acutest_list_[i], index++, i);
+ }
+
+ /* Write a summary */
+ if(!acutest_no_summary_ && acutest_verbose_level_ >= 1) {
+ if(acutest_verbose_level_ >= 3) {
+ acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Summary:\n");
+
+ printf(" Count of all unit tests: %4d\n", (int) acutest_list_size_);
+ printf(" Count of run unit tests: %4d\n", acutest_stat_run_units_);
+ printf(" Count of failed unit tests: %4d\n", acutest_stat_failed_units_);
+ printf(" Count of skipped unit tests: %4d\n", (int) acutest_list_size_ - acutest_stat_run_units_);
+ }
+
+ if(acutest_stat_failed_units_ == 0) {
+ acutest_colored_printf_(ACUTEST_COLOR_GREEN_INTENSIVE_, "SUCCESS:");
+ printf(" All unit tests have passed.\n");
+ } else {
+ acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED:");
+ printf(" %d of %d unit tests %s failed.\n",
+ acutest_stat_failed_units_, acutest_stat_run_units_,
+ (acutest_stat_failed_units_ == 1) ? "has" : "have");
+ }
+
+ if(acutest_verbose_level_ >= 3)
+ printf("\n");
+ }
+
+ if (acutest_xml_output_) {
+#if defined ACUTEST_UNIX_
+ char *suite_name = basename(argv[0]);
+#elif defined ACUTEST_WIN_
+ char suite_name[_MAX_FNAME];
+ _splitpath(argv[0], NULL, NULL, suite_name, NULL);
+#else
+ const char *suite_name = argv[0];
+#endif
+ fprintf(acutest_xml_output_, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+ fprintf(acutest_xml_output_, "<testsuite name=\"%s\" tests=\"%d\" errors=\"%d\" failures=\"%d\" skip=\"%d\">\n",
+ suite_name, (int)acutest_list_size_, acutest_stat_failed_units_, acutest_stat_failed_units_,
+ (int)acutest_list_size_ - acutest_stat_run_units_);
+ for(i = 0; acutest_list_[i].func != NULL; i++) {
+ struct acutest_test_data_ *details = &acutest_test_data_[i];
+ fprintf(acutest_xml_output_, " <testcase name=\"%s\" time=\"%.2f\">\n", acutest_list_[i].name, details->duration);
+ if (details->flags & ACUTEST_FLAG_FAILURE_)
+ fprintf(acutest_xml_output_, " <failure />\n");
+ if (!(details->flags & ACUTEST_FLAG_FAILURE_) && !(details->flags & ACUTEST_FLAG_SUCCESS_))
+ fprintf(acutest_xml_output_, " <skipped />\n");
+ fprintf(acutest_xml_output_, " </testcase>\n");
+ }
+ fprintf(acutest_xml_output_, "</testsuite>\n");
+ fclose(acutest_xml_output_);
+ }
+
+ acutest_cleanup_();
+
+ return (acutest_stat_failed_units_ == 0) ? 0 : 1;
+}
+
+
+#endif /* #ifndef TEST_NO_MAIN */
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+#ifdef __cplusplus
+ } /* extern "C" */
+#endif
+
+#endif /* #ifndef ACUTEST_H */
diff --git a/src/fluent-bit/tests/lib/shunit2/.gitignore b/src/fluent-bit/tests/lib/shunit2/.gitignore
new file mode 100644
index 000000000..4832c04f1
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/.gitignore
@@ -0,0 +1,3 @@
+# Hidden files generated by macOS.
+.DS_Store
+._*
diff --git a/src/fluent-bit/tests/lib/shunit2/.travis.yml b/src/fluent-bit/tests/lib/shunit2/.travis.yml
new file mode 100644
index 000000000..55aeda473
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/.travis.yml
@@ -0,0 +1,29 @@
+language: bash
+
+env:
+ - SHUNIT_COLOR='always'
+
+script:
+ # Execute the unit tests.
+ - ./test_runner
+
+os:
+ - linux
+ - osx
+
+addons:
+ apt:
+ packages:
+ - ksh
+ - zsh
+
+matrix:
+ include:
+ - os: linux
+ script:
+ # Run the source through ShellCheck (http://www.shellcheck.net).
+ - shellcheck shunit2 *_test.sh
+ - shellcheck -s sh shunit2_test_helpers
+ - os: linux
+ # Support Ubuntu Trusty through Apr 2019.
+ dist: trusty
diff --git a/src/fluent-bit/tests/lib/shunit2/CODE_OF_CONDUCT.md b/src/fluent-bit/tests/lib/shunit2/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..dc906ab01
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at kate.ward@forestent.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/src/fluent-bit/tests/lib/shunit2/LICENSE b/src/fluent-bit/tests/lib/shunit2/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/fluent-bit/tests/lib/shunit2/README.md b/src/fluent-bit/tests/lib/shunit2/README.md
new file mode 100644
index 000000000..dd6bba420
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/README.md
@@ -0,0 +1,636 @@
+# shUnit2
+
+shUnit2 is a [xUnit](http://en.wikipedia.org/wiki/XUnit) unit test framework for
+Bourne based shell scripts, and it is designed to work in a similar manner to
+[JUnit](http://www.junit.org), [PyUnit](http://pyunit.sourceforge.net), etc.. If
+you have ever had the desire to write a unit test for a shell script, shUnit2
+can do the job.
+
+[![Travis CI](https://img.shields.io/travis/kward/shunit2.svg)](https://travis-ci.org/kward/shunit2)
+
+## Table of Contents
+
+* [Introduction](#introduction)
+ * [Credits / Contributors](#credits-contributors)
+ * [Feedback](#feedback)
+* [Quickstart](#quickstart)
+* [Function Reference](#function-reference)
+ * [General Info](#general-info)
+ * [Asserts](#asserts)
+ * [Failures](#failures)
+ * [Setup/Teardown](#setup-teardown)
+ * [Skipping](#skipping)
+ * [Suites](#suites)
+* [Advanced Usage](#advanced-usage)
+ * [Some constants you can use](#some-constants-you-can-use)
+ * [Error Handling](#error-handling)
+ * [Including Line Numbers in Asserts (Macros)](#including-line-numbers-in-asserts-macros)
+ * [Test Skipping](#test-skipping)
+ * [Running specific tests from the command line](#cmd-line-args)
+* [Appendix](#appendix)
+ * [Getting help](#getting-help)
+ * [Zsh](#zsh)
+
+---
+
+## <a name="introduction"></a> Introduction
+
+shUnit2 was originally developed to provide a consistent testing solution for
+[log4sh][log4sh], a shell based logging framework similar to
+[log4j](http://logging.apache.org). During the development of that product, a
+repeated problem of having things work just fine under one shell (`/bin/bash` on
+Linux to be specific), and then not working under another shell (`/bin/sh` on
+Solaris) kept coming up. Although several simple tests were run, they were not
+adequate and did not catch some corner cases. The decision was finally made to
+write a proper unit test framework after multiple brown-bag releases were made.
+_Research was done to look for an existing product that met the testing
+requirements, but no adequate product was found._
+
+### Tested software
+
+**Tested Operating Systems** (varies over time)
+
+OS | Support | Verified
+----------------------------------- | --------- | --------
+Ubuntu Linux (14.04.05 LTS) | Travis CI | continuous
+macOS High Sierra (10.13.3) | Travis CI | continuous
+FreeBSD | user | unknown
+Solaris 8, 9, 10 (inc. OpenSolaris) | user | unknown
+Cygwin | user | unknown
+
+**Tested Shells**
+
+* Bourne Shell (__sh__)
+* BASH - GNU Bourne Again SHell (__bash__)
+* DASH (__dash__)
+* Korn Shell (__ksh__)
+* pdksh - Public Domain Korn Shell (__pdksh__)
+* zsh - Zsh (__zsh__) (since 2.1.2) _please see the Zsh shell errata for more information_
+
+See the appropriate Release Notes for this release
+(`doc/RELEASE_NOTES-X.X.X.txt`) for the list of actual versions tested.
+
+### <a name="credits-contributors"></a> Credits / Contributors
+
+A list of contributors to shUnit2 can be found in `doc/contributors.md`. Many
+thanks go out to all those who have contributed to make this a better tool.
+
+shUnit2 is the original product of many hours of work by Kate Ward, the primary
+author of the code. For related software, check out https://github.com/kward.
+
+### <a name="feedback"></a> Feedback
+
+Feedback is most certainly welcome for this document. Send your questions,
+comments, and criticisms via the
+[shunit2-users](https://groups.google.com/a/forestent.com/forum/#!forum/shunit2-users/new)
+forum (created 2018-12-09), or file an issue via
+https://github.com/kward/shunit2/issues.
+
+---
+
+## <a name="quickstart"></a> Quickstart
+
+This section will give a very quick start to running unit tests with shUnit2.
+More information is located in later sections.
+
+Here is a quick sample script to show how easy it is to write a unit test in
+shell. _Note: the script as it stands expects that you are running it from the
+"examples" directory._
+
+```sh
+#! /bin/sh
+# file: examples/equality_test.sh
+
+testEquality() {
+ assertEquals 1 1
+}
+
+# Load shUnit2.
+. ./shunit2
+```
+
+Running the unit test should give results similar to the following.
+
+```console
+$ cd examples
+$ ./equality_test.sh
+testEquality
+
+Ran 1 test.
+
+OK
+```
+
+W00t! You've just run your first successful unit test. So, what just happened?
+Quite a bit really, and it all happened simply by sourcing the `shunit2`
+library. The basic functionality for the script above goes like this:
+
+* When shUnit2 is sourced, it will walk through any functions defined whose name
+ starts with the string `test`, and add those to an internal list of tests to
+ execute. Once a list of test functions to be run has been determined, shunit2
+ will go to work.
+* Before any tests are executed, shUnit2 again looks for a function, this time
+ one named `oneTimeSetUp()`. If it exists, it will be run. This function is
+ normally used to setup the environment for all tests to be run. Things like
+ creating directories for output or setting environment variables are good to
+ place here. Just so you know, you can also declare a corresponding function
+ named `oneTimeTearDown()` function that does the same thing, but once all the
+ tests have been completed. It is good for removing temporary directories, etc.
+* shUnit2 is now ready to run tests. Before doing so though, it again looks for
+ another function that might be declared, one named `setUp()`. If the function
+ exists, it will be run before each test. It is good for resetting the
+ environment so that each test starts with a clean slate. **At this stage, the
+ first test is finally run.** The success of the test is recorded for a report
+ that will be generated later. After the test is run, shUnit2 looks for a final
+ function that might be declared, one named `tearDown()`. If it exists, it will
+ be run after each test. It is a good place for cleaning up after each test,
+ maybe doing things like removing files that were created, or removing
+ directories. This set of steps, `setUp() > test() > tearDown()`, is repeated
+ for all of the available tests.
+* Once all the work is done, shUnit2 will generate the nice report you saw
+ above. A summary of all the successes and failures will be given so that you
+ know how well your code is doing.
+
+We should now try adding a test that fails. Change your unit test to look like
+this.
+
+```sh
+#! /bin/sh
+# file: examples/party_test.sh
+
+testEquality() {
+ assertEquals 1 1
+}
+
+testPartyLikeItIs1999() {
+ year=`date '+%Y'`
+ assertEquals "It's not 1999 :-(" '1999' "${year}"
+}
+
+# Load shUnit2.
+. ./shunit2
+```
+
+So, what did you get? I guess it told you that this isn't 1999. Bummer, eh?
+Hopefully, you noticed a couple of things that were different about the second
+test. First, we added an optional message that the user will see if the assert
+fails. Second, we did comparisons of strings instead of integers as in the first
+test. It doesn't matter whether you are testing for equality of strings or
+integers. Both work equally well with shUnit2.
+
+Hopefully, this is enough to get you started with unit testing. If you want a
+ton more examples, take a look at the tests provided with [log4sh][log4sh] or
+[shFlags][shflags]. Both provide excellent examples of more advanced usage.
+shUnit2 was after all written to meet the unit testing need that
+[log4sh][log4sh] had.
+
+---
+
+## <a name="function-reference"></a> Function Reference
+
+### <a name="general-info"></a> General Info
+
+Any string values passed should be properly quoted -- they should must be
+surrounded by single-quote (`'`) or double-quote (`"`) characters -- so that the
+shell will properly parse them.
+
+### <a name="asserts"></a> Asserts
+
+`assertEquals [message] expected actual`
+
+Asserts that _expected_ and _actual_ are equal to one another. The _expected_
+and _actual_ values can be either strings or integer values as both will be
+treated as strings. The _message_ is optional, and must be quoted.
+
+`assertNotEquals [message] unexpected actual`
+
+Asserts that _unexpected_ and _actual_ are not equal to one another. The
+_unexpected_ and _actual_ values can be either strings or integer values as both
+will be treaded as strings. The _message_ is optional, and must be quoted.
+
+`assertSame [message] expected actual`
+
+This function is functionally equivalent to `assertEquals`.
+
+`assertNotSame [message] unexpected actual`
+
+This function is functionally equivalent to `assertNotEquals`.
+
+`assertContains [message] container content`
+
+Asserts that _container_ contains _content_. The _container_ and _content_
+values can be either strings or integer values as both will be treated as
+strings. The _message_ is optional, and must be quoted.
+
+`assertNotContains [message] container content`
+
+Asserts that _container_ does not contain _content_. The _container_ and
+_content_ values can be either strings or integer values as both will be treaded
+as strings. The _message_ is optional, and must be quoted.
+
+`assertNull [message] value`
+
+Asserts that _value_ is _null_, or in shell terms, a zero-length string. The
+_value_ must be a string as an integer value does not translate into a zero-
+length string. The _message_ is optional, and must be quoted.
+
+`assertNotNull [message] value`
+
+Asserts that _value_ is _not null_, or in shell terms, a non-empty string. The
+_value_ may be a string or an integer as the later will be parsed as a non-empty
+string value. The _message_ is optional, and must be quoted.
+
+`assertTrue [message] condition`
+
+Asserts that a given shell test _condition_ is _true_. The condition can be as
+simple as a shell _true_ value (the value `0` -- equivalent to
+`${SHUNIT_TRUE}`), or a more sophisticated shell conditional expression. The
+_message_ is optional, and must be quoted.
+
+A sophisticated shell conditional expression is equivalent to what the __if__ or
+__while__ shell built-ins would use (more specifically, what the __test__
+command would use). Testing for example whether some value is greater than
+another value can be done this way.
+
+`assertTrue "[ 34 -gt 23 ]"`
+
+Testing for the ability to read a file can also be done. This particular test
+will fail.
+
+`assertTrue 'test failed' "[ -r /some/non-existant/file' ]"`
+
+As the expressions are standard shell __test__ expressions, it is possible to
+string multiple expressions together with `-a` and `-o` in the standard fashion.
+This test will succeed as the entire expression evaluates to _true_.
+
+`assertTrue 'test failed' '[ 1 -eq 1 -a 2 -eq 2 ]'`
+
+<i>One word of warning: be very careful with your quoting as shell is not the
+most forgiving of bad quoting, and things will fail in strange ways.</i>
+
+`assertFalse [message] condition`
+
+Asserts that a given shell test _condition_ is _false_. The condition can be as
+simple as a shell _false_ value (the value `1` -- equivalent to
+`${SHUNIT_FALSE}`), or a more sophisticated shell conditional expression. The
+_message_ is optional, and must be quoted.
+
+_For examples of more sophisticated expressions, see `assertTrue`._
+
+### <a name="failures"></a> Failures
+
+Just to clarify, failures __do not__ test the various arguments against one
+another. Failures simply fail, optionally with a message, and that is all they
+do. If you need to test arguments against one another, use asserts.
+
+If all failures do is fail, why might one use them? There are times when you may
+have some very complicated logic that you need to test, and the simple asserts
+provided are simply not adequate. You can do your own validation of the code,
+use an `assertTrue ${SHUNIT_TRUE}` if your own tests succeeded, and use a
+failure to record a failure.
+
+`fail [message]`
+
+Fails the test immediately. The _message_ is optional, and must be quoted.
+
+`failNotEquals [message] unexpected actual`
+
+Fails the test immediately, reporting that the _unexpected_ and _actual_ values
+are not equal to one another. The _message_ is optional, and must be quoted.
+
+_Note: no actual comparison of unexpected and actual is done._
+
+`failSame [message] expected actual`
+
+Fails the test immediately, reporting that the _expected_ and _actual_ values
+are the same. The _message_ is optional, and must be quoted.
+
+_Note: no actual comparison of expected and actual is done._
+
+`failNotSame [message] expected actual`
+
+Fails the test immediately, reporting that the _expected_ and _actual_ values
+are not the same. The _message_ is optional, and must be quoted.
+
+_Note: no actual comparison of expected and actual is done._
+
+`failFound [message] content`
+
+Fails the test immediately, reporting that the _content_ was found. The
+_message_ is optional, and must be quoted.
+
+_Note: no actual search of content is done._
+
+`failNotFound [message] content`
+
+Fails the test immediately, reporting that the _content_ was not found. The
+_message_ is optional, and must be quoted.
+
+_Note: no actual search of content is done._
+
+### <a name="setup-teardown"></a> Setup/Teardown
+
+`oneTimeSetUp`
+
+This function can be be optionally overridden by the user in their test suite.
+
+If this function exists, it will be called once before any tests are run. It is
+useful to prepare a common environment for all tests.
+
+`oneTimeTearDown`
+
+This function can be be optionally overridden by the user in their test suite.
+
+If this function exists, it will be called once after all tests are completed.
+It is useful to clean up the environment after all tests.
+
+`setUp`
+
+This function can be be optionally overridden by the user in their test suite.
+
+If this function exists, it will be called before each test is run. It is useful
+to reset the environment before each test.
+
+`tearDown`
+
+This function can be be optionally overridden by the user in their test suite.
+
+If this function exists, it will be called after each test completes. It is
+useful to clean up the environment after each test.
+
+### <a name="skipping"></a> Skipping
+
+`startSkipping`
+
+This function forces the remaining _assert_ and _fail_ functions to be
+"skipped", i.e. they will have no effect. Each function skipped will be recorded
+so that the total of asserts and fails will not be altered.
+
+`endSkipping`
+
+This function returns calls to the _assert_ and _fail_ functions to their
+default behavior, i.e. they will be called.
+
+`isSkipping`
+
+This function returns the current state of skipping. It can be compared against
+`${SHUNIT_TRUE}` or `${SHUNIT_FALSE}` if desired.
+
+### <a name="suites"></a> Suites
+
+The default behavior of shUnit2 is that all tests will be found dynamically. If
+you have a specific set of tests you want to run, or you don't want to use the
+standard naming scheme of prefixing your tests with `test`, these functions are
+for you. Most users will never use them though.
+
+`suite`
+
+This function can be optionally overridden by the user in their test suite.
+
+If this function exists, it will be called when `shunit2` is sourced. If it does
+not exist, shUnit2 will search the parent script for all functions beginning
+with the word `test`, and they will be added dynamically to the test suite.
+
+`suite_addTest name`
+
+This function adds a function named _name_ to the list of tests scheduled for
+execution as part of this test suite. This function should only be called from
+within the `suite()` function.
+
+---
+
+## <a name="advanced-usage"></a> Advanced Usage
+
+### <a name="some-constants-you-can-use"></a> Some constants you can use
+
+There are several constants provided by shUnit2 as variables that might be of
+use to you.
+
+*Predefined*
+
+| Constant | Value |
+| --------------- | ----- |
+| SHUNIT\_TRUE | Standard shell `true` value (the integer value 0). |
+| SHUNIT\_FALSE | Standard shell `false` value (the integer value 1). |
+| SHUNIT\_ERROR | The integer value 2. |
+| SHUNIT\_TMPDIR | Path to temporary directory that will be automatically cleaned up upon exit of shUnit2. |
+| SHUNIT\_VERSION | The version of shUnit2 you are running. |
+
+*User defined*
+
+| Constant | Value |
+| ----------------- | ----- |
+| SHUNIT\_CMD\_EXPR | Override which `expr` command is used. By default `expr` is used, except on BSD systems where `gexpr` is used. |
+| SHUNIT\_COLOR | Enable colorized output. Options are 'auto', 'always', or 'none', with 'auto' being the default. |
+| SHUNIT\_PARENT | The filename of the shell script containing the tests. This is needed specifically for Zsh support. |
+| SHUNIT\_TEST\_PREFIX | Define this variable to add a prefix in front of each test name that is output in the test report. |
+
+### <a name="error-handling"></a> Error handling
+
+The constants values `SHUNIT_TRUE`, `SHUNIT_FALSE`, and `SHUNIT_ERROR` are
+returned from nearly every function to indicate the success or failure of the
+function. Additionally the variable `flags_error` is filled with a detailed
+error message if any function returns with a `SHUNIT_ERROR` value.
+
+### <a name="including-line-numbers-in-asserts-macros"></a> Including Line Numbers in Asserts (Macros)
+
+If you include lots of assert statements in an individual test function, it can
+become difficult to determine exactly which assert was thrown unless your
+messages are unique. To help somewhat, line numbers can be included in the
+assert messages. To enable this, a special shell "macro" must be used rather
+than the standard assert calls. _Shell doesn't actually have macros; the name is
+used here as the operation is similar to a standard macro._
+
+For example, to include line numbers for a `assertEquals()` function call,
+replace the `assertEquals()` with `${_ASSERT_EQUALS_}`.
+
+_**Example** -- Asserts with and without line numbers_
+
+```sh
+#! /bin/sh
+# file: examples/lineno_test.sh
+
+testLineNo() {
+ # This assert will have line numbers included (e.g. "ASSERT:[123] ...").
+ echo "ae: ${_ASSERT_EQUALS_}"
+ ${_ASSERT_EQUALS_} 'not equal' 1 2
+
+ # This assert will not have line numbers included (e.g. "ASSERT: ...").
+ assertEquals 'not equal' 1 2
+}
+
+# Load shUnit2.
+. ./shunit2
+```
+
+Notes:
+
+1. Due to how shell parses command-line arguments, all strings used with macros
+ should be quoted twice. Namely, single-quotes must be converted to single-
+ double-quotes, and vice-versa. If the string being passed is absolutely for
+ sure not empty, the extra quoting is not necessary.<br/>
+ <br/>
+ Normal `assertEquals` call.<br/>
+ `assertEquals 'some message' 'x' ''`<br/>
+ <br/>
+ Macro `_ASSERT_EQUALS_` call. Note the extra quoting around the _message_ and
+ the _null_ value.<br/>
+ `_ASSERT_EQUALS_ '"some message"' 'x' '""'`
+
+1. Line numbers are not supported in all shells. If a shell does not support
+ them, no errors will be thrown. Supported shells include: __bash__ (>=3.0),
+ __ksh__, __pdksh__, and __zsh__.
+
+### <a name="test-skipping"></a> Test Skipping
+
+There are times where the test code you have written is just not applicable to
+the system you are running on. This section describes how to skip these tests
+but maintain the total test count.
+
+Probably the easiest example would be shell code that is meant to run under the
+__bash__ shell, but the unit test is running under the Bourne shell. There are
+things that just won't work. The following test code demonstrates two sample
+functions, one that will be run under any shell, and the another that will run
+only under the __bash__ shell.
+
+_**Example** -- math include_
+```sh
+# file: examples/math.inc.
+
+add_generic() {
+ num_a=$1
+ num_b=$2
+
+ expr $1 + $2
+}
+
+add_bash() {
+ num_a=$1
+ num_b=$2
+
+ echo $(($1 + $2))
+}
+```
+
+And here is a corresponding unit test that correctly skips the `add_bash()` function when the unit test is not running under the __bash__ shell.
+
+_**Example** -- math unit test_
+```sh
+#! /bin/sh
+# file: examples/math_test.sh
+
+testAdding() {
+ result=`add_generic 1 2`
+ assertEquals \
+ "the result of '${result}' was wrong" \
+ 3 "${result}"
+
+ # Disable non-generic tests.
+ [ -z "${BASH_VERSION:-}" ] && startSkipping
+
+ result=`add_bash 1 2`
+ assertEquals \
+ "the result of '${result}' was wrong" \
+ 3 "${result}"
+}
+
+oneTimeSetUp() {
+ # Load include to test.
+ . ./math.inc
+}
+
+# Load and run shUnit2.
+. ./shunit2
+```
+
+Running the above test under the __bash__ shell will result in the following
+output.
+
+```console
+$ /bin/bash math_test.sh
+testAdding
+
+Ran 1 test.
+
+OK
+```
+
+But, running the test under any other Unix shell will result in the following
+output.
+
+```console
+$ /bin/ksh math_test.sh
+testAdding
+
+Ran 1 test.
+
+OK (skipped=1)
+```
+
+As you can see, the total number of tests has not changed, but the report
+indicates that some tests were skipped.
+
+Skipping can be controlled with the following functions: `startSkipping()`,
+`endSkipping()`, and `isSkipping()`. Once skipping is enabled, it will remain
+enabled until the end of the current test function call, after which skipping is
+disabled.
+
+### <a name="cmd-line-args"></a> Running specific tests from the command line.
+
+When running a test script, you may override the default set of tests, or the suite-specified set of tests, by providing additional arguments on the command line. Each additional argument after the `--` marker is assumed to be the name of a test function to be run in the order specified. e.g.
+
+```console
+test-script.sh -- testOne testTwo otherFunction
+```
+
+or
+
+```console
+shunit2 test-script.sh testOne testTwo otherFunction
+```
+
+In either case, three functions will be run as tests, `testOne`, `testTwo`, and `otherFunction`. Note that the function `otherFunction` would not normally be run by `shunit2` as part of the implicit collection of tests as it's function name does not match the test function name pattern `test*`.
+
+If a specified test function does not exist, `shunit2` will still attempt to run that function and thereby cause a failure which `shunit2` will catch and mark as a failed test. All other tests will run normally.
+
+The specification of tests does not affect how `shunit2` looks for and executes the setup and tear down functions, which will still run as expected.
+
+---
+
+## <a name="appendix"></a> Appendix
+
+### <a name="getting-help"></a> Getting Help
+
+For help, please send requests to either the shunit2-users@forestent.com mailing
+list (archives available on the web at
+https://groups.google.com/a/forestent.com/forum/#!forum/shunit2-users) or
+directly to Kate Ward <kate dot ward at forestent dot com>.
+
+### <a name="zsh"></a> Zsh
+
+For compatibility with Zsh, there is one requirement that must be met -- the
+`shwordsplit` option must be set. There are three ways to accomplish this.
+
+1. In the unit-test script, add the following shell code snippet before sourcing
+ the `shunit2` library.
+
+ ```sh
+ setopt shwordsplit
+ ```
+
+2. When invoking __zsh__ from either the command-line or as a script with `#!`,
+ add the `-y` parameter.
+
+ ```sh
+ #! /bin/zsh -y
+ ```
+
+3. When invoking __zsh__ from the command-line, add `-o shwordsplit --` as
+ parameters before the script name.
+
+ ```console
+ $ zsh -o shwordsplit -- some_script
+ ```
+
+[log4sh]: https://github.com/kward/log4sh
+[shflags]: https://github.com/kward/shflags
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/CHANGES-2.1.md b/src/fluent-bit/tests/lib/shunit2/doc/CHANGES-2.1.md
new file mode 100644
index 000000000..776ff5a9a
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/CHANGES-2.1.md
@@ -0,0 +1,261 @@
+# shUnit2 2.1.x Changes
+
+## Changes with 2.1.8
+
+### New
+
+Issue #29. Add support for user defined prefix for test names. A prefix can be
+added by defining the `SHUNIT_TEST_PREFIX` variable.
+
+### Improvements
+
+Issue #78. Added an example for using suite tests.
+
+Run continuous integration additionally against Ubuntu Trusty.
+
+### Fixed
+
+Issue #94. Removed the `gen_test_report.sh` script as the Travis CI output can
+be used instead. Reports were used before Travis CI was used.
+
+Issue #84. Treat syntax errors in functions as test failures.
+
+Issue #77. Fail tests when the environment functions (e.g. `setup()` or
+`tearDown()`) fail.
+
+
+## Changes with 2.1.7
+
+### Bug fixes
+
+Issue #69. shUnit2 should not exit with 0 when it has (syntax) errors.
+
+### Enhancements
+
+Issue #54. Shell commands prefixed with '\' so that they can be stubbed in
+tests.
+
+Issue #68. Ran all code through [ShellCheck](http://www.shellcheck.net/).
+
+Issue #60. Continuous integration tests now run with
+[Travis CI](https://travis-ci.org/kward/shunit2).
+
+Issue #56. Added color support. Color is enabled automatically when supported,
+but can be disabled by defining the SHUNIT_COLOR environment variable before
+sourcing shunit2. Accepted values are `always`, `auto` (the default), and
+`none`.
+
+Issue #35. Add colored output.
+
+### Other
+
+Moved code to GitHub (https://github.com/kward/shunit2), and restructured to
+be more GitHub like.
+
+Changed to the Apache 2.0 license.
+
+
+## Changes with 2.1.6
+
+Removed all references to the DocBook documentation.
+
+Simplified the 'src' structure.
+
+Fixed error message in fail() that stated wrong number of required arguments.
+
+Updated lib/versions.
+
+Fixed bug in `_shunit_mktempDir()` where a failure occurred when the 'od'
+command was not present in `/usr/bin`.
+
+Renamed `shunit_tmpDir` variable to `SHUNIT_TMPDIR` to closer match the standard
+`TMPDIR` variable.
+
+Added support for calling shunit2 as an executable, in addition to the existing
+method of sourcing it in as a library. This allows users to keep tests working
+despite the location of the shunit2 executable being different for each OS
+distribution.
+
+Issue #14: Improved handling of some strange chars (e.g. single and double
+quotes) in messages.
+
+Issue# 27: Fixed error message for `assertSame()`.
+
+Issue# 25: Added check and error message to user when phantom functions are
+written to a partition mounted with `noexec`.
+
+Issue# 11: Added support for defining functions like `function someFunction()`.
+
+
+## Changes with 2.1.5
+
+Issue# 1: Fixed bug pointed out by R Bernstein in the trap code where certain
+types of exit conditions did not generate the ending report.
+
+Issue# 2: Added `assertNotEquals()` assert.
+
+Issue# 3: Moved check for unset variables out of shUnit2 into the unit tests.
+Testing poorly written software blows up if this check is in, but it is only
+interesting for shUnit2 itself. Added `shunit_test_output.sh` unit test for
+this. Some shells still do not catch such errors properly (e.g. Bourne shell and
+BASH 2.x).
+
+Added new custom assert in test_helpers to check for output to STDOUT, and none
+to STDERR.
+
+Replaced fatal message in the temp directory creation with a `_shunit_fatal()`
+function call.
+
+Fixed test_output unit test so it works now that the 'set -u' stuff was removed
+for Issue# 3.
+
+Flushed out the coding standards in the `README.txt` a bit more, and brought the
+shunit2 code up to par with the documented standards.
+
+Issue# 4: Completely changed the reporting output to be a closer match for
+JUnit and PyUnit. As a result, tests are counted separately from assertions.
+
+Provide public `shunit_tmpDir` variable that can be used by unit test scripts
+that need automated and guaranteed cleanup.
+
+Issue# 7: Fixed duplicated printing of messages passed to asserts.
+
+Per code review, fixed wording of `failSame()` and `failNotSame()` messages.
+
+Replaced `version_info.sh` with versions library and made appropriate changes in
+other scripts to use it.
+
+Added `gen_test_results.sh` to make releases easier.
+
+Fixed bugs in `shlib_relToAbsPath()` in shlib.
+
+Converted DocBook documentation to reStructuredText for easier maintenance. The
+DocBook documentation is now considered obsolete, and will be removed in a
+future release.
+
+Issue# 5: Fixed the documentation around the usage of failures.
+
+Issue# 9: Added unit tests and updated documentation to demonstrate the
+requirement of quoting values twice when macros are used. This is due to how
+shell parses arguments.
+
+When an invalid number of arguments is passed to a function, the invalid number
+is returned to the user so they are more aware of what the cause might be.
+
+
+## Changes with 2.1.4
+
+Removed the `_shunit_functionExists()` function as it was dead code.
+
+Fixed zsh version number check in version_info.
+
+Fixed bug in last resort temporary directory creation.
+
+Fixed off-by-one in exit value for scripts caught by the trap handler.
+
+Added argument count error checking to all functions.
+
+Added mkdir_test.sh example.
+
+Moved src/test into src/shell to better match structure used with shFlags.
+
+Fixed problem where null values were not handled properly under ksh.
+
+Added support for outputting line numbers as part of assert messages.
+
+Started documenting the coding standards, and changed some variable names as a
+result.
+
+Improved zsh version and option checks.
+
+Renamed the `__SHUNIT_VERSION` variable to `SHUNIT_VERSION`.
+
+
+## Changes with 2.1.3
+
+Added some explicit variable defaults, even though the variables are set, as
+they sometimes behave strange when the script is canceled.
+
+Additional workarounds for zsh compatibility.
+
+shUnit2 now exits with a non-zero exit code if any of the tests failed. This was
+done for automated testing frameworks. Tests that were skipped are not
+considered failures, and do not affect the exit code.
+
+Changed detection of STDERR output in unit tests.
+
+
+## Changes with 2.1.2
+
+Unset additional variables that were missed.
+
+Added checks and workarounds to improve zsh compatibility.
+
+Added some argument count checks `assertEquals()`, `assertNull()`, and
+`assertSame()`.
+
+
+## Changes with 2.1.1
+
+Fixed bug where `fail()` was not honoring skipping.
+
+Fixed problem with `docs-docbook-prep` target that prevented it from working.
+(Thanks to Bryan Larsen for pointing this out.)
+
+Changed the test in `assertFalse()` so that any non-zero value registers as
+false. (Credits to Bryan Larsen)
+
+Major fiddling to bring more in line with [JUnit](http://junit.org/). Asserts
+give better output when no message is given, and failures now just fail.
+
+It was pointed out that the simple 'failed' message for a failed assert was not
+only insufficient, it was nonstandard (when compared to JUnit) and didn't
+provide the user with an expected vs actual result. The code was revised
+somewhat to bring closer into alignment with JUnit (v4.3.1 specifically) so
+that it feels more "normal". (Credits to Richard Jensen)
+
+As part of the JUnit realignment, it was noticed that `fail*()` functions in
+JUnit don't actually do any comparisons themselves. They only generate a
+failure message. Updated the code to match.
+
+Added self-testing unit tests. Kinda horkey, but they did find bugs during the
+JUnit realignment.
+
+Fixed the code for returning from asserts as the return was being called before
+the unsetting of variables occurred. (Credits to Mathias Goldau)
+
+The assert(True|False)() functions now accept an integer value for a
+conditional test. A value of '0' is considered 'true', while any non-zero value
+is considered 'false'.
+
+All public functions now fill use default values to work properly with the '-x'
+shell debugging flag.
+
+Fixed the method of percent calculation for the report to get achieve better
+accuracy.
+
+
+## Changes with 2.1.0 (since 2.0.1)
+
+This release is a branch of the 2.0.1 release.
+
+Moving to [reStructured Text](http://docutils.sourceforge.net/rst.html) for
+the documentation.
+
+Fixed problem with `fail()`. The failure message was not properly printed.
+
+Fixed the `Makefile` so that the DocBook XML and XSLT files would be
+downloaded before parsing can continue.
+
+Renamed the internal `__SHUNIT_TRUE` and `__SHUNIT_FALSE` variables to
+`SHUNIT_TRUE` and `SHUNIT_FALSE` so that unit tests can "use" them.
+
+Added support for test "skipping". If skipping is turned on with the
+`startSkip()` function, `assert` and `fail` functions will return immediately,
+and the skip will be recorded.
+
+The report output format was changed to include the percentage for each test
+result, rather than just those successful.
+
+
+[travis_ci]: https://travis-ci.org/kward/shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.0.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.0.txt
new file mode 100644
index 000000000..c99f74633
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.0.txt
@@ -0,0 +1,104 @@
+Release Notes for shUnit2 2.1.0
+===============================
+
+This release was branched from shUnit2 2.0.1. It mostly adds new functionality,
+but there are couple of bugs fixed from the previous release.
+
+See the ``CHANGES-2.1.rst`` file for a full list of changes.
+
+
+Tested Platforms
+----------------
+
+This list of platforms comes from the latest version of log4sh as shUnit2 is
+used in the testing of log4sh on each of these platforms.
+
+Cygwin
+
+- bash 3.2.9(10)
+- pdksh 5.2.14
+
+Linux
+
+- bash 3.1.17(1), 3.2.10(1)
+- dash 0.5.3
+- ksh 1993-12-28
+- pdksh 5.2.14
+- zsh 4.3.2 (does not work)
+
+Mac OS X 10.4.8 (Darwin 8.8)
+
+- bash 2.05b.0(1)
+- ksh 1993-12-28
+
+Solaris 8 U3 (x86)
+
+- /bin/sh
+- bash 2.03.0(1)
+- ksh M-11/16/88i
+
+Solaris 10 U2 (sparc)
+
+- /bin/sh
+- bash 3.00.16(1)
+- ksh M-11/16/88i
+
+Solaris 10 U2 (x86)
+
+- /bin/sh
+- bash 3.00.16(1)
+- ksh M-11/16/88i
+
+
+New Features
+------------
+
+Test skipping
+
+ Support added for test "skipping". A skip mode can be enabled so that
+ subsequent ``assert`` and ``fail`` functions that are called will be recorded
+ as "skipped" rather than as "passed" or "failed". This functionality can be
+ used such that when a set of tests makes sense on one platform but not on
+ another, they can be effectively disabled without altering the total number
+ of tests.
+
+ One example might be when something is supported under ``bash``, but not
+ under a standard Bourne shell.
+
+ New functions: ``startSkipping()``, ``endSkipping``, ``isSkipping``
+
+
+Changes and Enhancements
+------------------------
+
+Moving to the use of `reStructured Text
+<http://docutils.sourceforge.net/rst.html>`_ for documentation. It is easy to
+read and edit in textual form, but converts nicely to HTML.
+
+The report format has changed. Rather than including a simple "success"
+percentage at the end, a percentage is given for each type of test.
+
+
+Bug Fixes
+---------
+
+The ``fail()`` function did not output the optional failure message.
+
+Fixed the ``Makefile`` so that the DocBook XML and XSLT files would be
+downloaded before documentation parsing will continue.
+
+
+Deprecated Features
+-------------------
+
+None.
+
+
+Known Bugs and Issues
+---------------------
+
+None.
+
+
+.. $Revision$
+.. vim:fileencoding=latin1:spell:syntax=rst:textwidth=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.1.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.1.txt
new file mode 100644
index 000000000..4c6100511
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.1.txt
@@ -0,0 +1,88 @@
+Release Notes for shUnit2 2.1.1
+===============================
+
+This is mainly a bug fix release, but it also incorporates a realignment with
+the JUnit 4 code. Asserts now provide better failure messages, and the failure
+functions no longer perform tests.
+
+See the ``CHANGES-2.1.txt`` file for a full list of changes.
+
+
+Tested Platforms
+----------------
+
+This list of platforms comes from the latest version of log4sh as shUnit2 is
+used in the testing of log4sh on each of these platforms.
+
+Cygwin
+
+- bash 3.2.15(13)
+- pdksh 5.2.14
+
+Linux
+
+- bash 3.1.17(1), 3.2.10(1)
+- dash 0.5.3
+- ksh 1993-12-28
+- pdksh 5.2.14
+- zsh 4.3.2 (does not work)
+
+Mac OS X 10.4.9 (Darwin 8.9.1)
+
+- bash 2.05b.0(1)
+- ksh 1993-12-28
+
+Solaris 8 U3 (x86)
+
+- /bin/sh
+- bash 2.03.0(1)
+- ksh M-11/16/88i
+
+Solaris 10 U2 (sparc, x86)
+
+- /bin/sh
+- bash 3.00.16(1)
+- ksh M-11/16/88i
+
+
+New Features
+------------
+
+None.
+
+
+Changes and Enhancements
+------------------------
+
+The internal test in ``assertFalse()`` now accepts any non-zero value as false.
+
+The ``assertTrue()`` and ``assertFalse()`` functions now accept an integer value
+for a conditional test. A value of '0' is considered 'true', while any non-zero
+value is considered 'false'.
+
+Self-testing unit tests were added.
+
+
+Bug Fixes
+---------
+
+The ``fail()`` assert now honors skipping.
+
+The ``docs-docbook-prep`` target now works properly.
+
+All asserts now properly unset their variables.
+
+
+Deprecated Features
+-------------------
+
+None.
+
+
+Known Bugs and Issues
+---------------------
+
+Functions do not properly test for an invalid number of arguments.
+
+
+.. vim:fileencoding=latin1:ft=rst:spell:textwidth=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.2.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.2.txt
new file mode 100644
index 000000000..549298421
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.2.txt
@@ -0,0 +1,83 @@
+Release Notes for shUnit2 2.1.2
+===============================
+
+This release adds initial support for the zsh shell. Due to some differences
+with this shell as compared with others, some special checks have been added,
+and there are some extra requirements necessary when this shell is to be used.
+
+To use zsh with shUnit2, the following two requirements must be met:
+* The ``shwordsplit`` option must be set.
+* The ``function_argzero`` option must be unset.
+
+Please read the Shell Errata section of the documentation for guidance on how
+to meet these requirements.
+
+
+See the ``CHANGES-2.1.txt`` file for a full list of changes.
+
+
+Tested Platforms
+----------------
+
+This list of platforms comes from the latest version of log4sh as shUnit2 is
+used in the testing of log4sh on each of these platforms.
+
+Linux
+
+- bash 3.1.17(1), 3.2.25(1)
+- dash 0.5.4
+- ksh 1993-12-28
+- pdksh 5.2.14
+- zsh 4.2.5, 4.3.4
+
+Mac OS X 10.4.11 (Darwin 8.11.1)
+
+- bash 2.05b.0(1)
+- ksh 1993-12-28
+- zsh 4.2.3
+
+Solaris 10 U3 (x86)
+
+- /bin/sh
+- bash 3.00.16(1)
+- ksh M-11/16/88i
+- zsh 4.2.1
+
+
+New Features
+------------
+
+Support for the zsh shell.
+
+
+Changes and Enhancements
+------------------------
+
+Added some argument count checks.
+
+
+Bug Fixes
+---------
+
+None.
+
+
+Deprecated Features
+-------------------
+
+None.
+
+
+Known Bugs and Issues
+---------------------
+
+Functions do not properly test for an invalid number of arguments.
+
+ksh and pdksh do not pass null arguments (i.e. empty strings as '') properly,
+and as such checks do not work properly.
+
+zsh requires the ``shwordsplit`` option to be set, and the ``function_argzero``
+option to be unset for proper operation.
+
+
+.. vim:fileencoding=latin1:ft=rst:spell:textwidth=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.3.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.3.txt
new file mode 100644
index 000000000..7d1c9f652
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.3.txt
@@ -0,0 +1,84 @@
+Release Notes for shUnit2 2.1.3
+===============================
+
+This release is minor feature release. It improves support for zsh (although it
+still isn't what it could be) and adds automated testing framework support by
+returning a non-zero exit when tests fail.
+
+To use zsh with shUnit2, the following two requirements must be met:
+* The ``shwordsplit`` option must be set.
+* The ``function_argzero`` option must be unset.
+
+Please read the Shell Errata section of the documentation for guidance on how
+to meet these requirements.
+
+See the ``CHANGES-2.1.txt`` file for a full list of changes.
+
+
+Tested Platforms
+----------------
+
+Cygwin
+
+- bash 3.2.33(18)
+- pdksh 5.2.14
+
+Linux
+
+- bash 3.2.33(1)
+- dash 0.5.4
+- ksh 1993-12-28
+- pdksh 5.2.14
+- zsh 4.3.4
+
+Mac OS X 10.5.2 (Darwin 9.2.2)
+
+- bash 3.2.17(1)
+- ksh 1993-12-28
+- zsh 4.3.4
+
+Solaris 11 x86 (Nevada 77)
+
+- /bin/sh
+- bash 3.2.25(1)
+- ksh M-11/16/88i
+- zsh 4.3.4
+
+
+New Features
+------------
+
+None.
+
+
+Changes and Enhancements
+------------------------
+
+Support for automated testing frameworks.
+
+
+Bug Fixes
+---------
+
+Fixed some issues with zsh support.
+
+
+Deprecated Features
+-------------------
+
+None.
+
+
+Known Bugs and Issues
+---------------------
+
+Functions do not properly test for an invalid number of arguments.
+
+ksh and pdksh do not pass null arguments (i.e. empty strings as '') properly,
+and as such checks do not work properly.
+
+zsh requires the ``shwordsplit`` option to be set, and the ``function_argzero``
+option to be unset for proper operation.
+
+
+.. vim:fileencoding=latin1:ft=rst:spell:textwidth=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.4.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.4.txt
new file mode 100644
index 000000000..007b5c3a7
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.4.txt
@@ -0,0 +1,100 @@
+Release Notes for shUnit2 2.1.4
+===============================
+
+This release contains lots of bug fixes and changes. Mostly, it fixes zsh
+support in zsh 3.0, and the handling of null values in ksh.
+
+To use zsh with shUnit2, the following requirement must be met:
+
+- The ``shwordsplit`` option must be set.
+
+Please read the Shell Errata section of the documentation for guidance on how
+to meet these requirements.
+
+See the ``CHANGES-2.1.txt`` file for a full list of changes.
+
+
+Tested Platforms
+----------------
+
+Cygwin
+
+- bash 3.2.39(19)
+- pdksh 5.2.14
+- zsh 4.3.4
+
+Linux (Ubuntu Dapper 6.06)
+
+- bash 3.1.17(1)
+- pdksh 5.2.14
+- zsh 4.2.5
+
+Linux (Ubuntu Hardy 8.04)
+
+- bash 3.2.39(1)
+- dash 0.5.4
+- ksh 1993-12-28
+- pdksh 5.2.14
+- zsh 4.3.4
+
+Mac OS X 10.5.4 (Darwin 9.4.0)
+
+- bash 3.2.17(1)
+- ksh 1993-12-28
+- zsh 4.3.4
+
+Solaris 9 U6 x86
+
+- /bin/sh
+- bash 2.05.0(1)
+- ksh M-11/16/88i
+- zsh 3.0.8
+
+Solaris 11 x86 (Nevada 77)
+
+- /bin/sh
+- bash 3.2.25(1)
+- ksh M-11/16/88i
+- zsh 4.3.4
+
+
+New Features
+------------
+
+Support added to output assert source line number as part of assert messages.
+
+
+Changes and Enhancements
+------------------------
+
+Support for automated testing frameworks.
+
+Added argument count error checking to all functions.
+
+
+Bug Fixes
+---------
+
+Fixed some issues with ksh and zsh support.
+
+Fixed off-by-one of exit value in trap handler.
+
+Fixed handling of null values under ksh.
+
+Fixed bug in last resort temporary directory creation.
+
+
+Deprecated Features
+-------------------
+
+None.
+
+
+Known Bugs and Issues
+---------------------
+
+zsh requires the ``shwordsplit`` option to be set.
+
+Line numbers in assert messages do not work properly with Bash 2.x.
+
+.. vim:fileencoding=latin1:ft=rst:spell:tw=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.5.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.5.txt
new file mode 100644
index 000000000..d9f26cec8
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.5.txt
@@ -0,0 +1,128 @@
+Release Notes for shUnit2 2.1.5
+===============================
+
+This release contains several bug fixes and changes. Additionally, it includes
+a rewrite of the test output to better match JUnit and PyUnit.
+
+This version also includes a slightly expanded set of coding standards by which
+shUnit2 is coded. It should help anyone reading the code to better understand
+it.
+
+
+
+Please read the Shell Errata section of the documentation for guidance on how
+to meet these requirements.
+
+See the ``CHANGES-2.1.txt`` file for a full list of changes.
+
+
+Tested Platforms
+----------------
+
+Cygwin
+
+- bash 3.2.39(20)
+- ksh (sym-link to pdksh)
+- pdksh 5.2.14
+- zsh 4.3.4
+
+Linux (Ubuntu Dapper 6.06)
+
+- bash 3.1.17(1)
+- ksh M-1993-12-28
+- pdksh 5.2.14-99/07/13.2
+- zsh 4.2.5
+
+Linux (Ubuntu Hardy 8.04)
+
+- bash 3.2.39(1)
+- dash 0.5.4
+- ksh M-1993-12-28
+- pdksh 5.2.14-99/07/13.2
+- zsh 4.3.4
+
+Mac OS X 10.5.4 (Darwin 9.4.0)
+
+- bash 3.2.17(1)
+- ksh M-1993-12-28
+- zsh 4.3.4
+
+Solaris 9 U6 x86
+
+- /bin/sh
+- bash 2.05.0(1)
+- ksh M-11/16/88i
+- zsh 3.0.8
+
+Solaris 11 x86 (Nevada 77)
+
+- /bin/sh
+- bash 3.2.25(1)
+- ksh M-11/16/88i
+- zsh 4.3.4
+
+
+New Features
+------------
+
+Support added for output assert source line number as part of assert messages.
+
+Issue #2: Added assertNotEquals() assert.
+
+Provided a public ``shunit_tmpDir`` variable that can be used by unit test
+scripts that need automated and guaranteed cleanup.
+
+
+Changes and Enhancements
+------------------------
+
+Issue #3: Removed the check for unset variables as shUnit2 should not expect
+scripts being tested to be clean.
+
+Issue #4: Rewrote the test summary. It is now greatly simplified and much more
+script friendly.
+
+Issue #5: Fixed the documentation around the usage of failures.
+
+Issue #9: Added unit tests and improved documentation around the use of macros.
+
+Code updated to meet documented coding standards.
+
+Improved code reuse of ``_shunit_exit()`` and ``_shunit_fatal()`` functions.
+
+All output except shUnit2 error messages now goes to STDOUT.
+
+Converted DocBook documentation to reStructuredText for easier maintenance.
+
+
+Bug Fixes
+---------
+
+Issue #1: Fixed bug in rap code where certain types of exit conditions did not
+generate the ending report.
+
+Issue #7: Fixed duplicated printing of messages passed to asserts.
+
+Fixed bugs in ``shlib_relToAbsPath()`` in ``shlib``.
+
+
+Deprecated Features
+-------------------
+
+None.
+
+
+Known Bugs and Issues
+---------------------
+
+Zsh requires the ``shwordsplit`` option to be set. See the documentation for
+examples of how to do this.
+
+Line numbers in assert messages do not work properly with BASH 2.x.
+
+The Bourne shell of Solaris, BASH 2.x, and Zsh 3.0.x do not properly catch the
+SIGTERM signal. As such, shell interpreter failures due to such things as
+unbound variables cannot be caught. (See ``shunit_test_misc.sh``)
+
+
+.. vim:fileencoding=latin1:ft=rst:spell:tw=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.6.txt b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.6.txt
new file mode 100644
index 000000000..50087fe48
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.6.txt
@@ -0,0 +1,112 @@
+Release Notes for shUnit2 2.1.6
+===============================
+
+This release contains bug fixes and changes. It is also the first release to
+support running shunit2 as a standalone program.
+
+Please read the Shell Errata section of the documentation for guidance on how
+to meet these requirements.
+
+See the ``CHANGES-2.1.txt`` file for a full list of changes.
+
+New Features
+------------
+
+Support for running shUnit2 as a standalone program. This makes it possible for
+users to execute their unit tests in a manner that is not dependent on the
+location an OS distribution maintainer chose to place shUnit2 in the file
+system.
+
+Added support for functions defined like 'function someFunction()'.
+
+Changes and Enhancements
+------------------------
+
+Renamed the public ``shunit_tmpDir`` variable to ``SHUNIT_TMPDIR`` to be more
+consistent with the ``TMPDIR`` variable.
+
+Bug Fixes
+---------
+
+Fixed issue where shunit2 would fail on some distributions when creating a
+temporary directory because the **od** command was not present.
+
+Deprecated Features
+-------------------
+
+None.
+
+Known Bugs and Issues
+---------------------
+
+Zsh requires the ``shwordsplit`` option to be set. See the documentation for
+examples of how to do this.
+
+Line numbers in assert messages do not work properly with BASH 2.x.
+
+The Bourne shell of Solaris, BASH 2.x, and Zsh 3.0.x do not properly catch the
+SIGTERM signal. As such, shell interpreter failures due to such things as
+unbound variables cannot be caught. (See ``shunit_test_misc.sh``)
+
+Tested Platforms
+----------------
+
+Cygwin 1.7.9 (Windows XP SP2)
+
+- bash 4.1.10(4)
+- dash 0.5.6.1
+- ksh (sym-link to pdksh)
+- pdksh 5.2.14
+- zsh 4.3.11
+
+Linux (Ubuntu Dapper 6.06.2 LTS)
+
+- bash 3.1.17(1)
+- dash 0.5.3
+- ksh (sym-link to pdksh)
+- pdksh 5.2.14-99/07/13.2
+- zsh 4.2.5
+
+Linux (Ubuntu Hardy 8.04.4 LTS)
+
+- bash 3.2.39(1)
+- dash 0.5.4
+- ksh M-1993-12-28
+- pdksh 5.2.14-99/07/13.2
+- zsh 4.3.4
+
+Linux (Ubuntu Lucid 10.04.2 LTS)
+
+- bash 4.1.5(1)
+- dash 0.5.5.1
+- ksh JM-93t+-2009-05-01
+- pdksh 5.2.14-99/07/13.2
+- zsh 4.3.10
+
+Mac OS X 10.6.7
+
+- bash 3.2.48(1)
+- ksh M-1993-12-28
+- zsh 4.3.9
+
+Solaris 8 U7 x86
+
+- /bin/sh
+- bash 2.03.0(1)
+- ksh M-11/16/88i
+- zsh 3.0.6
+
+Solaris 9 U6 x86
+
+- /bin/sh
+- bash 2.05.0(1)
+- ksh M-11/16/88i
+- zsh 3.0.8
+
+OpenSolaris 2009.06(snv_111b) x86
+
+- /bin/sh
+- bash 3.2.25(1)
+- ksh 2008-11-04
+
+.. vim:fileencoding=latin1:ft=rst:spell:tw=80
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.7.md b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.7.md
new file mode 100644
index 000000000..044564d8b
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.7.md
@@ -0,0 +1,66 @@
+# shUnit2 2.1.7 Release Notes
+
+https://github.com/kward/shunit2
+
+This release contains bug fixes and enhancements. It is the first release since moving to GitHub. Users can now clone the latest version at any time.
+
+See the `CHANGES-2.1.md` file for a full list of changes.
+
+
+## New Features
+
+Colorized output, based on popular demand. shUnit2 output is now colorized based on the result of the asserts.
+
+
+## Changes and Enhancements
+
+With the move to GitHub, the shUnit2 unit tests are run on every commit using the [Travis CI][TravisCI] continuous integration framework. Additionally, all code is run through [ShellCheck](http:/www.shellcheck.net/) on every commit.
+
+[TravisCI]: https://travis-ci.org/kward/shunit2
+
+Shell commands in shUnit2 are prefixed with '\' so that they can be stubbed in tests.
+
+
+## Bug Fixes
+
+shUnit2 no longer exits with an 'OK' result if there were syntax errors due to incorrect usage of the assert commands.
+
+
+## Deprecated Features
+
+None.
+
+
+## Known Bugs and Issues
+
+Zsh requires the `shwordsplit` option to be set. See the documentation for examples of how to do this.
+
+Line numbers in assert messages do not work properly with BASH 2.x.
+
+The Bourne shell of Solaris, BASH 2.x, and Zsh 3.0.x do not properly catch the
+SIGTERM signal. As such, shell interpreter failures due to such things as
+unbound variables cannot be caught. (See `shunit_test_misc.sh`)
+
+
+## Tested Platforms
+
+Continuous integration testing is provided by
+[Travis CI](https://travis-ci.org/).
+
+https://travis-ci.org/github/kward/shunit2
+
+Tested OSes:
+
+- Linux
+- macOS
+
+Tested shells:
+
+- /bin/sh
+- ash
+- bash
+- dash
+- ksh
+- pdksh
+- zsh
+
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.8.md b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.8.md
new file mode 100644
index 000000000..d09d16ae8
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/RELEASE_NOTES-2.1.8.md
@@ -0,0 +1,56 @@
+# shUnit2 2.1.8 Release Notes
+
+https://github.com/kward/shunit2
+
+This release contains bug fixes and enhancements. See the `CHANGES-2.1.md` file
+for a full list of changes.
+
+## New features
+
+Users can now define a custom prefix for test function names. The prefix can be
+configured by defining a `SHUNIT_TEST_PREFIX` variable.
+
+## Bug fixes
+
+Syntax errors in functions are now treated as test failures.
+
+Test now fail when `setup()` or `tearDown()` fail.
+
+## Deprecated features
+
+None.
+
+## Known bugs and issues
+
+Zsh requires the `shwordsplit` option to be set. See the documentation for examples of how to do this.
+
+Line numbers in assert messages do not work properly with BASH 2.x.
+
+The Bourne shell of Solaris, BASH 2.x, and Zsh 3.0.x do not properly catch the
+SIGTERM signal. As such, shell interpreter failures due to such things as
+unbound variables cannot be caught. (See `shunit_test_misc.sh`)
+
+shUnit2 does not work when the `-e` shell option is set (typically done with
+`set -e`).
+
+## Tested platforms
+
+Continuous integration testing is provided by
+[Travis CI](https://travis-ci.org/).
+
+https://travis-ci.org/github/kward/shunit2
+
+Tested OSes:
+
+- Linux
+- macOS
+
+Tested shells:
+
+- /bin/sh
+- ash
+- bash
+- dash
+- ksh
+- pdksh
+- zsh
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/TODO.txt b/src/fluent-bit/tests/lib/shunit2/doc/TODO.txt
new file mode 100644
index 000000000..cecc17dd5
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/TODO.txt
@@ -0,0 +1,13 @@
+Make it possible to execute a single test by passing the name of the test on
+the command line
+
+Add support for '--randomize-order' so that the test order is randomized to
+check for dependencies (which shouldn't be there) between tests.
+
+--debug option to display point in source code (line number and such) where the
+problem showed up.
+
+assertTrue() just gives 'ASSERT:', nothing else :-(. others too?
+upd: assertNull() will give message passed, but nothing else useful :-(
+
+$Revision$
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/contributors.md b/src/fluent-bit/tests/lib/shunit2/doc/contributors.md
new file mode 100644
index 000000000..7adae2234
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/contributors.md
@@ -0,0 +1,15 @@
+The original author of shunit2 is Kate Ward. The following people have
+contributed in some way or another to shunit2.
+
+- [Alex Harvey](https://github.com/alexharv074)
+- Bryan Larsen
+- [David Acacio](https://github.com/dacacioa)
+- Kevin Van Horn
+- [Maciej Bliziński](https://github.com/automatthias)
+- Mario Sparada
+- Mathias Goldau
+- Richard Jensen
+- Rob Holland
+- Rocky Bernstein
+- [rugk](https://github.com/rugk)
+- wood4321 (of code.google.com)
diff --git a/src/fluent-bit/tests/lib/shunit2/doc/design_doc.txt b/src/fluent-bit/tests/lib/shunit2/doc/design_doc.txt
new file mode 100644
index 000000000..24d41ff19
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/doc/design_doc.txt
@@ -0,0 +1,34 @@
+Design Doc for shUnit
+
+shUnit is based upon JUnit. The initial ideas for the script came from the book
+"Pragmatic Unit Testing - In Java with JUnit" by Andrew Hunt and David Thomas.
+
+The script was written to perform unit testing for log4sh. log4sh had grown
+enough that it was becoming difficult to easily test and and verify that the
+tests passed for the many different operating systems on which it was being
+used.
+
+The functions in shUnit are meant to match those in JUnit as much as possible
+where shell allows. In the initial version, there will be no concept of
+exceptions (as normal POSIX shell has no concept of them) but attempts to trap
+problems will be done.
+
+Programatic Standards:
+
+* SHUNIT_TRUE - public global constant
+* __SHUNIT_SHELL_FLAGS - private global constant
+* __shunit_oldShellFlags - private global variable
+
+* assertEquals - public unit test function
+* shunit_publicFunc - public shUnit function; can be called from parent unit
+ test script
+* _shunit_privateFunc - private shUnit function; should not be called from
+ parent script. meant for internal use by shUnit
+
+* _su_myVar - variable inside a public function. prefixing with '_su_' to
+ reduce the chances that a variable outside of shUnit will be overridden.
+* _su__myVar - variable inside a private function. prefixing with '_su__' to
+ reduce the chances that a variable in a shUnit public function, or a variable
+ outside of shUnit will be overridden.
+
+$Revision$
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/equality_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/equality_test.sh
new file mode 100755
index 000000000..7b5664033
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/equality_test.sh
@@ -0,0 +1,9 @@
+#! /bin/sh
+# file: examples/equality_test.sh
+
+testEquality() {
+ assertEquals 1 1
+}
+
+# Load and run shUnit2.
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/lineno_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/lineno_test.sh
new file mode 100755
index 000000000..11ddfc8a0
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/lineno_test.sh
@@ -0,0 +1,15 @@
+#! /bin/sh
+# file: examples/lineno_test.sh
+
+testLineNo() {
+ # This assert will have line numbers included (e.g. "ASSERT:[123] ...") if
+ # they are supported.
+ echo "_ASSERT_EQUALS_ macro value: ${_ASSERT_EQUALS_}"
+ ${_ASSERT_EQUALS_} '"not equal"' 1 2
+
+ # This assert will not have line numbers included (e.g. "ASSERT: ...").
+ assertEquals 'not equal' 1 2
+}
+
+# Load and run shUnit2.
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/math.inc b/src/fluent-bit/tests/lib/shunit2/examples/math.inc
new file mode 100644
index 000000000..409710669
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/math.inc
@@ -0,0 +1,17 @@
+# available as examples/math.inc
+
+add_generic()
+{
+ num_a=$1
+ num_b=$2
+
+ expr $1 + $2
+}
+
+add_bash()
+{
+ num_a=$1
+ num_b=$2
+
+ echo $(($1 + $2))
+}
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/math_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/math_test.sh
new file mode 100755
index 000000000..c6d00299d
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/math_test.sh
@@ -0,0 +1,25 @@
+#! /bin/sh
+# file: examples/math_test.sh
+
+testAdding() {
+ result=`add_generic 1 2`
+ assertEquals \
+ "the result of '${result}' was wrong" \
+ 3 "${result}"
+
+ # Disable non-generic tests.
+ [ -z "${BASH_VERSION:-}" ] && startSkipping
+
+ result=`add_bash 1 2`
+ assertEquals \
+ "the result of '${result}' was wrong" \
+ 3 "${result}"
+}
+
+oneTimeSetUp() {
+ # Load include to test.
+ . ./math.inc
+}
+
+# Load and run shUnit2.
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/mkdir_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/mkdir_test.sh
new file mode 100755
index 000000000..a608e3aa3
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/mkdir_test.sh
@@ -0,0 +1,79 @@
+#!/bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008-2019 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# shUnit2 -- Unit testing framework for Unix shell scripts.
+# https://github.com/kward/shunit2
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# Example unit test for the mkdir command.
+#
+# There are times when an existing shell script needs to be tested. In this
+# example, we will test several aspects of the the mkdir command, but the
+# techniques could be used for any existing shell script.
+
+testMissingDirectoryCreation() {
+ ${mkdirCmd} "${testDir}" >${stdoutF} 2>${stderrF}
+ rtrn=$?
+ th_assertTrueWithNoOutput ${rtrn} "${stdoutF}" "${stderrF}"
+
+ assertTrue 'directory missing' "[ -d '${testDir}' ]"
+}
+
+testExistingDirectoryCreationFails() {
+ # Create a directory to test against.
+ ${mkdirCmd} "${testDir}"
+
+ # Test for expected failure while trying to create directory that exists.
+ ${mkdirCmd} "${testDir}" >${stdoutF} 2>${stderrF}
+ rtrn=$?
+ assertFalse 'expecting return code of 1 (false)' ${rtrn}
+ assertNull 'unexpected output to stdout' "`cat ${stdoutF}`"
+ assertNotNull 'expected error message to stderr' "`cat ${stderrF}`"
+
+ assertTrue 'directory missing' "[ -d '${testDir}' ]"
+}
+
+testRecursiveDirectoryCreation() {
+ testDir2="${testDir}/test2"
+
+ ${mkdirCmd} -p "${testDir2}" >${stdoutF} 2>${stderrF}
+ rtrn=$?
+ th_assertTrueWithNoOutput ${rtrn} "${stdoutF}" "${stderrF}"
+
+ assertTrue 'first directory missing' "[ -d '${testDir}' ]"
+ assertTrue 'second directory missing' "[ -d '${testDir2}' ]"
+}
+
+th_assertTrueWithNoOutput() {
+ th_return_=$1
+ th_stdout_=$2
+ th_stderr_=$3
+
+ assertFalse 'unexpected output to STDOUT' "[ -s '${th_stdout_}' ]"
+ assertFalse 'unexpected output to STDERR' "[ -s '${th_stderr_}' ]"
+
+ unset th_return_ th_stdout_ th_stderr_
+}
+
+oneTimeSetUp() {
+ outputDir="${SHUNIT_TMPDIR}/output"
+ mkdir "${outputDir}"
+ stdoutF="${outputDir}/stdout"
+ stderrF="${outputDir}/stderr"
+
+ mkdirCmd='mkdir' # save command name in variable to make future changes easy
+ testDir="${SHUNIT_TMPDIR}/some_test_dir"
+}
+
+tearDown() {
+ rm -fr "${testDir}"
+}
+
+# Load and run shUnit2.
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/mock_file.sh b/src/fluent-bit/tests/lib/shunit2/examples/mock_file.sh
new file mode 100755
index 000000000..812e44886
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/mock_file.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+#
+# shUnit2 example for mocking files.
+#
+# This example demonstrates two different mechanisms for mocking files on the
+# system. The first method is preferred for testing specific aspects of a file,
+# and the second method is preferred when multiple tests need access to the
+# same mock data.
+#
+# When mocking files, the key thing of importance is providing the code under
+# test with the correct file to read. The best practice for writing code where
+# files need to be mocked is either:
+# - Pass the filename to be tested into a function and test that function, or
+# - Provide a function that returns the name of the filename to be read.
+#
+# The first case is preferred whenever possible as it allows the unit test to
+# be explicit about what is being tested. The second case is useful when the
+# first case is not achievable.
+#
+# For the second case, there are two common methods to mock the filename
+# returned by the function:
+# - Provide a special value (e.g. a mock variable) that is only available
+# during testing, or
+# - Override something (e.g. the constant) in the test script.
+#
+# The first case is preferred as it doesn't require the unit test to alter code
+# in any way. Yes, it means that the code itself knows that it is under test,
+# and it behaves slightly differently than under normal conditions, but a
+# visual inspection of the code by the developer should be sufficient to
+# validate proper functionality of such a simple function.
+
+# Treat unset variables as an error.
+set -u
+
+PASSWD='/etc/passwd'
+
+# Read the root UID from the passwd filename provided as the first argument.
+root_uid_from_passed_filename() {
+ filename=$1
+ root_uid "${filename}"
+ unset filename
+}
+
+
+# Read the root UID from the passwd filename derived by call to the
+# passwd_filename() function.
+root_uid_from_derived_filename() {
+ root_uid "$(passwd_filename)"
+}
+
+passwd_filename() {
+ if [ -n "${MOCK_PASSWD:-}" ]; then
+ echo "${MOCK_PASSWD}" # Mock file for testing.
+ return
+ fi
+ echo "${PASSWD}"
+}
+
+
+# Extract the root UID.
+root_uid() { awk -F: 'u==$1{print $3}' u=root "$1"; }
+
+
+main() {
+ echo "root_uid_from_passed_filename:"
+ root_uid_from_passed_filename "${PASSWD}"
+
+ echo
+
+ echo "root_uid_from_derived_filename:"
+ root_uid_from_derived_filename
+}
+
+
+# Execute main() if this is run in standalone mode (i.e. not in a unit test).
+ARGV0="$(basename "$0")"
+argv0="$(echo "${ARGV0}" |sed 's/_test$//;s/_test\.sh$//')"
+if [ "${ARGV0}" = "${argv0}" ]; then
+ main "$@"
+fi
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/mock_file_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/mock_file_test.sh
new file mode 100755
index 000000000..1da8dd2a3
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/mock_file_test.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# shUnit2 example for mocking files.
+
+MOCK_PASSWD='' # This will be overridden in oneTimeSetUp().
+
+test_root_uid_from_passed_filename() {
+ result="$(root_uid_from_passed_filename "${MOCK_PASSWD}")"
+ assertEquals 'unexpected root uid' '0' "${result}"
+}
+
+test_root_uid_from_derived_filename() {
+ result="$(root_uid_from_derived_filename)"
+ assertEquals 'unexpected root uid' '0' "${result}"
+}
+
+oneTimeSetUp() {
+ # Provide a mock passwd file for testing. This will be cleaned up
+ # automatically by shUnit2.
+ MOCK_PASSWD="${SHUNIT_TMPDIR}/passwd"
+ cat <<EOF >"${MOCK_PASSWD}"
+nobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false
+root:*:0:0:System Administrator:/var/root:/bin/sh
+daemon:*:1:1:System Services:/var/root:/usr/bin/false
+EOF
+
+ # Load script under test.
+ . './mock_file.sh'
+}
+
+# Load and run shUnit2.
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/party_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/party_test.sh
new file mode 100755
index 000000000..41bd124f2
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/party_test.sh
@@ -0,0 +1,16 @@
+#! /bin/sh
+# file: examples/party_test.sh
+#
+# This test is mostly for fun. Technically, it is a bad example of a unit test
+# because of the temporal requirement, namely that the year be 1999. A better
+# test would have been to pass in both a known-bad and known-good year into a
+# function, and test for the expected result.
+
+testPartyLikeItIs1999() {
+ year=`date '+%Y'`
+ assertEquals "It's not 1999 :-(" \
+ '1999' "${year}"
+}
+
+# Load and run shUnit2.
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/examples/suite_test.sh b/src/fluent-bit/tests/lib/shunit2/examples/suite_test.sh
new file mode 100755
index 000000000..278c3a01c
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/examples/suite_test.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# file: examples/suite_test.sh
+#
+# This test demonstrates the use of suites. Note: the suite functionality is
+# deprecated as of v2.1.0, and will be removed in a future major release.
+
+# suite is a special function called by shUnit2 to setup a suite of tests. It
+# enables a developer to call a set of functions that contain tests without
+# needing to rename the functions to start with "test".
+#
+# Tests that are to be called from within `suite()` are added to the list of
+# executable tests by means of the `suite_addTest()` function.
+suite() {
+ # Add the suite_test_one() function to the list of executable tests.
+ suite_addTest suite_test_one
+
+ # Call the suite_test_two() function, but note that the test results will not
+ # be added to the global stats, and therefore not reported at the end of the
+ # unit test execution.
+ suite_test_two
+}
+
+suite_test_one() {
+ assertEquals 1 1
+}
+
+suite_test_two() {
+ assertNotEquals 1 2
+}
+
+# Load and run shUnit2.
+. ../shunit2
diff --git a/src/fluent-bit/tests/lib/shunit2/lib/shflags b/src/fluent-bit/tests/lib/shunit2/lib/shflags
new file mode 100644
index 000000000..70cdea4db
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/lib/shflags
@@ -0,0 +1,1222 @@
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008-2017 Kate Ward. All Rights Reserved.
+# Released under the Apache License 2.0 license.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# shFlags -- Advanced command-line flag library for Unix shell scripts.
+# https://github.com/kward/shflags
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# This module implements something like the gflags library available
+# from https://github.com/gflags/gflags.
+#
+# FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags take
+# a name, default value, help-string, and optional 'short' name (one-letter
+# name). Some flags have other arguments, which are described with the flag.
+#
+# DEFINE_string: takes any input, and interprets it as a string.
+#
+# DEFINE_boolean: does not take any arguments. Say --myflag to set
+# FLAGS_myflag to true, or --nomyflag to set FLAGS_myflag to false. For short
+# flags, passing the flag on the command-line negates the default value, i.e.
+# if the default is true, passing the flag sets the value to false.
+#
+# DEFINE_float: takes an input and interprets it as a floating point number. As
+# shell does not support floats per-se, the input is merely validated as
+# being a valid floating point value.
+#
+# DEFINE_integer: takes an input and interprets it as an integer.
+#
+# SPECIAL FLAGS: There are a few flags that have special meaning:
+# --help (or -?) prints a list of all the flags in a human-readable fashion
+# --flagfile=foo read flags from foo. (not implemented yet)
+# -- as in getopt(), terminates flag-processing
+#
+# EXAMPLE USAGE:
+#
+# -- begin hello.sh --
+# #! /bin/sh
+# . ./shflags
+# DEFINE_string name 'world' "somebody's name" n
+# FLAGS "$@" || exit $?
+# eval set -- "${FLAGS_ARGV}"
+# echo "Hello, ${FLAGS_name}."
+# -- end hello.sh --
+#
+# $ ./hello.sh -n Kate
+# Hello, Kate.
+#
+# CUSTOMIZABLE BEHAVIOR:
+#
+# A script can override the default 'getopt' command by providing the path to
+# an alternate implementation by defining the FLAGS_GETOPT_CMD variable.
+#
+# NOTES:
+#
+# * Not all systems include a getopt version that supports long flags. On these
+# systems, only short flags are recognized.
+
+#==============================================================================
+# shFlags
+#
+# Shared attributes:
+# flags_error: last error message
+# flags_output: last function output (rarely valid)
+# flags_return: last return value
+#
+# __flags_longNames: list of long names for all flags
+# __flags_shortNames: list of short names for all flags
+# __flags_boolNames: list of boolean flag names
+#
+# __flags_opts: options parsed by getopt
+#
+# Per-flag attributes:
+# FLAGS_<flag_name>: contains value of flag named 'flag_name'
+# __flags_<flag_name>_default: the default flag value
+# __flags_<flag_name>_help: the flag help string
+# __flags_<flag_name>_short: the flag short name
+# __flags_<flag_name>_type: the flag type
+#
+# Notes:
+# - lists of strings are space separated, and a null value is the '~' char.
+#
+### ShellCheck (http://www.shellcheck.net/)
+# $() are not fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+# [ p -a q ] are well defined enough (vs [ p ] && [ q ]).
+# shellcheck disable=SC2166
+
+# Return if FLAGS already loaded.
+[ -n "${FLAGS_VERSION:-}" ] && return 0
+FLAGS_VERSION='1.2.3pre'
+
+# Return values that scripts can use.
+FLAGS_TRUE=0
+FLAGS_FALSE=1
+FLAGS_ERROR=2
+
+# Logging levels.
+FLAGS_LEVEL_DEBUG=0
+FLAGS_LEVEL_INFO=1
+FLAGS_LEVEL_WARN=2
+FLAGS_LEVEL_ERROR=3
+FLAGS_LEVEL_FATAL=4
+__FLAGS_LEVEL_DEFAULT=${FLAGS_LEVEL_WARN}
+
+# Determine some reasonable command defaults.
+__FLAGS_EXPR_CMD='expr --'
+__FLAGS_UNAME_S=`uname -s`
+if [ "${__FLAGS_UNAME_S}" = 'BSD' ]; then
+ __FLAGS_EXPR_CMD='gexpr --'
+else
+ _flags_output_=`${__FLAGS_EXPR_CMD} 2>&1`
+ if [ $? -eq ${FLAGS_TRUE} -a "${_flags_output_}" = '--' ]; then
+ # We are likely running inside BusyBox.
+ __FLAGS_EXPR_CMD='expr'
+ fi
+ unset _flags_output_
+fi
+
+# Commands a user can override if desired.
+FLAGS_EXPR_CMD=${FLAGS_EXPR_CMD:-${__FLAGS_EXPR_CMD}}
+FLAGS_GETOPT_CMD=${FLAGS_GETOPT_CMD:-getopt}
+
+# Specific shell checks.
+if [ -n "${ZSH_VERSION:-}" ]; then
+ setopt |grep "^shwordsplit$" >/dev/null
+ if [ $? -ne ${FLAGS_TRUE} ]; then
+ _flags_fatal 'zsh shwordsplit option is required for proper zsh operation'
+ fi
+ if [ -z "${FLAGS_PARENT:-}" ]; then
+ _flags_fatal "zsh does not pass \$0 through properly. please declare' \
+\"FLAGS_PARENT=\$0\" before calling shFlags"
+ fi
+fi
+
+# Can we use built-ins?
+( echo "${FLAGS_TRUE#0}"; ) >/dev/null 2>&1
+if [ $? -eq ${FLAGS_TRUE} ]; then
+ __FLAGS_USE_BUILTIN=${FLAGS_TRUE}
+else
+ __FLAGS_USE_BUILTIN=${FLAGS_FALSE}
+fi
+
+
+#
+# Constants.
+#
+
+# Reserved flag names.
+__FLAGS_RESERVED_LIST=' ARGC ARGV ERROR FALSE GETOPT_CMD HELP PARENT TRUE '
+__FLAGS_RESERVED_LIST="${__FLAGS_RESERVED_LIST} VERSION "
+
+# Determined getopt version (standard or enhanced).
+__FLAGS_GETOPT_VERS_STD=0
+__FLAGS_GETOPT_VERS_ENH=1
+
+# shellcheck disable=SC2120
+_flags_getopt_vers() {
+ _flags_getopt_cmd_=${1:-${FLAGS_GETOPT_CMD}}
+ case "`${_flags_getopt_cmd_} -lfoo '' --foo 2>&1`" in
+ ' -- --foo') echo ${__FLAGS_GETOPT_VERS_STD} ;;
+ ' --foo --') echo ${__FLAGS_GETOPT_VERS_ENH} ;;
+ # Unrecognized output. Assuming standard getopt version.
+ *) echo ${__FLAGS_GETOPT_VERS_STD} ;;
+ esac
+ unset _flags_getopt_cmd_
+}
+# shellcheck disable=SC2119
+__FLAGS_GETOPT_VERS=`_flags_getopt_vers`
+
+# getopt optstring lengths
+__FLAGS_OPTSTR_SHORT=0
+__FLAGS_OPTSTR_LONG=1
+
+__FLAGS_NULL='~'
+
+# Flag info strings.
+__FLAGS_INFO_DEFAULT='default'
+__FLAGS_INFO_HELP='help'
+__FLAGS_INFO_SHORT='short'
+__FLAGS_INFO_TYPE='type'
+
+# Flag lengths.
+__FLAGS_LEN_SHORT=0
+__FLAGS_LEN_LONG=1
+
+# Flag types.
+__FLAGS_TYPE_NONE=0
+__FLAGS_TYPE_BOOLEAN=1
+__FLAGS_TYPE_FLOAT=2
+__FLAGS_TYPE_INTEGER=3
+__FLAGS_TYPE_STRING=4
+
+# Set the constants readonly.
+__flags_constants=`set |awk -F= '/^FLAGS_/ || /^__FLAGS_/ {print $1}'`
+for __flags_const in ${__flags_constants}; do
+ # Skip certain flags.
+ case ${__flags_const} in
+ FLAGS_HELP) continue ;;
+ FLAGS_PARENT) continue ;;
+ esac
+ # Set flag readonly.
+ if [ -z "${ZSH_VERSION:-}" ]; then
+ readonly "${__flags_const}"
+ continue
+ fi
+ case ${ZSH_VERSION} in
+ [123].*) readonly "${__flags_const}" ;;
+ *) readonly -g "${__flags_const}" ;; # Declare readonly constants globally.
+ esac
+done
+unset __flags_const __flags_constants
+
+#
+# Internal variables.
+#
+
+# Space separated lists.
+__flags_boolNames=' ' # Boolean flag names.
+__flags_longNames=' ' # Long flag names.
+__flags_shortNames=' ' # Short flag names.
+__flags_definedNames=' ' # Defined flag names (used for validation).
+
+__flags_columns='' # Screen width in columns.
+__flags_level=0 # Default logging level.
+__flags_opts='' # Temporary storage for parsed getopt flags.
+
+#------------------------------------------------------------------------------
+# Private functions.
+#
+
+# Logging functions.
+_flags_debug() {
+ [ ${__flags_level} -le ${FLAGS_LEVEL_DEBUG} ] || return
+ echo "flags:DEBUG $*" >&2
+}
+_flags_info() {
+ [ ${__flags_level} -le ${FLAGS_LEVEL_INFO} ] || return
+ echo "flags:INFO $*" >&2
+}
+_flags_warn() {
+ [ ${__flags_level} -le ${FLAGS_LEVEL_WARN} ] || return
+ echo "flags:WARN $*" >&2
+}
+_flags_error() {
+ [ ${__flags_level} -le ${FLAGS_LEVEL_ERROR} ] || return
+ echo "flags:ERROR $*" >&2
+}
+_flags_fatal() {
+ [ ${__flags_level} -le ${FLAGS_LEVEL_FATAL} ] || return
+ echo "flags:FATAL $*" >&2
+ exit ${FLAGS_ERROR}
+}
+
+# Get the logging level.
+flags_loggingLevel() { echo ${__flags_level}; }
+
+# Set the logging level.
+#
+# Args:
+# _flags_level_: integer: new logging level
+# Returns:
+# nothing
+flags_setLoggingLevel() {
+ [ $# -ne 1 ] && _flags_fatal "flags_setLevel(): logging level missing"
+ _flags_level_=$1
+ [ "${_flags_level_}" -ge "${FLAGS_LEVEL_DEBUG}" \
+ -a "${_flags_level_}" -le "${FLAGS_LEVEL_FATAL}" ] \
+ || _flags_fatal "Invalid logging level '${_flags_level_}' specified."
+ __flags_level=$1
+ unset _flags_level_
+}
+
+# Define a flag.
+#
+# Calling this function will define the following info variables for the
+# specified flag:
+# FLAGS_flagname - the name for this flag (based upon the long flag name)
+# __flags_<flag_name>_default - the default value
+# __flags_flagname_help - the help string
+# __flags_flagname_short - the single letter alias
+# __flags_flagname_type - the type of flag (one of __FLAGS_TYPE_*)
+#
+# Args:
+# _flags_type_: integer: internal type of flag (__FLAGS_TYPE_*)
+# _flags_name_: string: long flag name
+# _flags_default_: default flag value
+# _flags_help_: string: help string
+# _flags_short_: string: (optional) short flag name
+# Returns:
+# integer: success of operation, or error
+_flags_define() {
+ if [ $# -lt 4 ]; then
+ flags_error='DEFINE error: too few arguments'
+ flags_return=${FLAGS_ERROR}
+ _flags_error "${flags_error}"
+ return ${flags_return}
+ fi
+
+ _flags_type_=$1
+ _flags_name_=$2
+ _flags_default_=$3
+ _flags_help_=${4:-§} # Special value '§' indicates no help string provided.
+ _flags_short_=${5:-${__FLAGS_NULL}}
+
+ _flags_debug "type:${_flags_type_} name:${_flags_name_}" \
+ "default:'${_flags_default_}' help:'${_flags_help_}'" \
+ "short:${_flags_short_}"
+
+ _flags_return_=${FLAGS_TRUE}
+ _flags_usName_="`_flags_underscoreName "${_flags_name_}"`"
+
+ # Check whether the flag name is reserved.
+ _flags_itemInList "${_flags_usName_}" "${__FLAGS_RESERVED_LIST}"
+ if [ $? -eq ${FLAGS_TRUE} ]; then
+ flags_error="flag name (${_flags_name_}) is reserved"
+ _flags_return_=${FLAGS_ERROR}
+ fi
+
+ # Require short option for getopt that don't support long options.
+ if [ ${_flags_return_} -eq ${FLAGS_TRUE} \
+ -a "${__FLAGS_GETOPT_VERS}" -ne "${__FLAGS_GETOPT_VERS_ENH}" \
+ -a "${_flags_short_}" = "${__FLAGS_NULL}" ]
+ then
+ flags_error="short flag required for (${_flags_name_}) on this platform"
+ _flags_return_=${FLAGS_ERROR}
+ fi
+
+ # Check for existing long name definition.
+ if [ ${_flags_return_} -eq ${FLAGS_TRUE} ]; then
+ if _flags_itemInList "${_flags_usName_}" "${__flags_definedNames}"; then
+ flags_error="definition for ([no]${_flags_name_}) already exists"
+ _flags_warn "${flags_error}"
+ _flags_return_=${FLAGS_FALSE}
+ fi
+ fi
+
+ # Check for existing short name definition.
+ if [ ${_flags_return_} -eq ${FLAGS_TRUE} \
+ -a "${_flags_short_}" != "${__FLAGS_NULL}" ]
+ then
+ if _flags_itemInList "${_flags_short_}" "${__flags_shortNames}"; then
+ flags_error="flag short name (${_flags_short_}) already defined"
+ _flags_warn "${flags_error}"
+ _flags_return_=${FLAGS_FALSE}
+ fi
+ fi
+
+ # Handle default value. Note, on several occasions the 'if' portion of an
+ # if/then/else contains just a ':' which does nothing. A binary reversal via
+ # '!' is not done because it does not work on all shells.
+ if [ ${_flags_return_} -eq ${FLAGS_TRUE} ]; then
+ case ${_flags_type_} in
+ ${__FLAGS_TYPE_BOOLEAN})
+ if _flags_validBool "${_flags_default_}"; then
+ case ${_flags_default_} in
+ true|t|0) _flags_default_=${FLAGS_TRUE} ;;
+ false|f|1) _flags_default_=${FLAGS_FALSE} ;;
+ esac
+ else
+ flags_error="invalid default flag value '${_flags_default_}'"
+ _flags_return_=${FLAGS_ERROR}
+ fi
+ ;;
+
+ ${__FLAGS_TYPE_FLOAT})
+ if _flags_validFloat "${_flags_default_}"; then
+ :
+ else
+ flags_error="invalid default flag value '${_flags_default_}'"
+ _flags_return_=${FLAGS_ERROR}
+ fi
+ ;;
+
+ ${__FLAGS_TYPE_INTEGER})
+ if _flags_validInt "${_flags_default_}"; then
+ :
+ else
+ flags_error="invalid default flag value '${_flags_default_}'"
+ _flags_return_=${FLAGS_ERROR}
+ fi
+ ;;
+
+ ${__FLAGS_TYPE_STRING}) ;; # Everything in shell is a valid string.
+
+ *)
+ flags_error="unrecognized flag type '${_flags_type_}'"
+ _flags_return_=${FLAGS_ERROR}
+ ;;
+ esac
+ fi
+
+ if [ ${_flags_return_} -eq ${FLAGS_TRUE} ]; then
+ # Store flag information.
+ eval "FLAGS_${_flags_usName_}='${_flags_default_}'"
+ eval "__flags_${_flags_usName_}_${__FLAGS_INFO_TYPE}=${_flags_type_}"
+ eval "__flags_${_flags_usName_}_${__FLAGS_INFO_DEFAULT}=\
+\"${_flags_default_}\""
+ eval "__flags_${_flags_usName_}_${__FLAGS_INFO_HELP}=\"${_flags_help_}\""
+ eval "__flags_${_flags_usName_}_${__FLAGS_INFO_SHORT}='${_flags_short_}'"
+
+ # append flag names to name lists
+ __flags_shortNames="${__flags_shortNames}${_flags_short_} "
+ __flags_longNames="${__flags_longNames}${_flags_name_} "
+ [ "${_flags_type_}" -eq "${__FLAGS_TYPE_BOOLEAN}" ] && \
+ __flags_boolNames="${__flags_boolNames}no${_flags_name_} "
+
+ # Append flag names to defined names for later validation checks.
+ __flags_definedNames="${__flags_definedNames}${_flags_usName_} "
+ [ "${_flags_type_}" -eq "${__FLAGS_TYPE_BOOLEAN}" ] && \
+ __flags_definedNames="${__flags_definedNames}no${_flags_usName_} "
+ fi
+
+ flags_return=${_flags_return_}
+ unset _flags_default_ _flags_help_ _flags_name_ _flags_return_ \
+ _flags_short_ _flags_type_ _flags_usName_
+ [ ${flags_return} -eq ${FLAGS_ERROR} ] && _flags_error "${flags_error}"
+ return ${flags_return}
+}
+
+# Underscore a flag name by replacing dashes with underscores.
+#
+# Args:
+# unnamed: string: log flag name
+# Output:
+# string: underscored name
+_flags_underscoreName() {
+ echo "$1" |tr '-' '_'
+}
+
+# Return valid getopt options using currently defined list of long options.
+#
+# This function builds a proper getopt option string for short (and long)
+# options, using the current list of long options for reference.
+#
+# Args:
+# _flags_optStr: integer: option string type (__FLAGS_OPTSTR_*)
+# Output:
+# string: generated option string for getopt
+# Returns:
+# boolean: success of operation (always returns True)
+_flags_genOptStr() {
+ _flags_optStrType_=$1
+
+ _flags_opts_=''
+
+ for _flags_name_ in ${__flags_longNames}; do
+ _flags_usName_="`_flags_underscoreName "${_flags_name_}"`"
+ _flags_type_="`_flags_getFlagInfo "${_flags_usName_}" "${__FLAGS_INFO_TYPE}"`"
+ [ $? -eq ${FLAGS_TRUE} ] || _flags_fatal 'call to _flags_type_ failed'
+ case ${_flags_optStrType_} in
+ ${__FLAGS_OPTSTR_SHORT})
+ _flags_shortName_="`_flags_getFlagInfo \
+ "${_flags_usName_}" "${__FLAGS_INFO_SHORT}"`"
+ if [ "${_flags_shortName_}" != "${__FLAGS_NULL}" ]; then
+ _flags_opts_="${_flags_opts_}${_flags_shortName_}"
+ # getopt needs a trailing ':' to indicate a required argument.
+ [ "${_flags_type_}" -ne "${__FLAGS_TYPE_BOOLEAN}" ] && \
+ _flags_opts_="${_flags_opts_}:"
+ fi
+ ;;
+
+ ${__FLAGS_OPTSTR_LONG})
+ _flags_opts_="${_flags_opts_:+${_flags_opts_},}${_flags_name_}"
+ # getopt needs a trailing ':' to indicate a required argument
+ [ "${_flags_type_}" -ne "${__FLAGS_TYPE_BOOLEAN}" ] && \
+ _flags_opts_="${_flags_opts_}:"
+ ;;
+ esac
+ done
+
+ echo "${_flags_opts_}"
+ unset _flags_name_ _flags_opts_ _flags_optStrType_ _flags_shortName_ \
+ _flags_type_ _flags_usName_
+ return ${FLAGS_TRUE}
+}
+
+# Returns flag details based on a flag name and flag info.
+#
+# Args:
+# string: underscored flag name
+# string: flag info (see the _flags_define function for valid info types)
+# Output:
+# string: value of dereferenced flag variable
+# Returns:
+# integer: one of FLAGS_{TRUE|FALSE|ERROR}
+_flags_getFlagInfo() {
+ # Note: adding gFI to variable names to prevent naming conflicts with calling
+ # functions
+ _flags_gFI_usName_=$1
+ _flags_gFI_info_=$2
+
+ # Example: given argument usName (underscored flag name) of 'my_flag', and
+ # argument info of 'help', set the _flags_infoValue_ variable to the value of
+ # ${__flags_my_flag_help}, and see if it is non-empty.
+ _flags_infoVar_="__flags_${_flags_gFI_usName_}_${_flags_gFI_info_}"
+ _flags_strToEval_="_flags_infoValue_=\"\${${_flags_infoVar_}:-}\""
+ eval "${_flags_strToEval_}"
+ if [ -n "${_flags_infoValue_}" ]; then
+ # Special value '§' indicates no help string provided.
+ [ "${_flags_gFI_info_}" = ${__FLAGS_INFO_HELP} \
+ -a "${_flags_infoValue_}" = '§' ] && _flags_infoValue_=''
+ flags_return=${FLAGS_TRUE}
+ else
+ # See if the _flags_gFI_usName_ variable is a string as strings can be
+ # empty...
+ # Note: the DRY principle would say to have this function call itself for
+ # the next three lines, but doing so results in an infinite loop as an
+ # invalid _flags_name_ will also not have the associated _type variable.
+ # Because it doesn't (it will evaluate to an empty string) the logic will
+ # try to find the _type variable of the _type variable, and so on. Not so
+ # good ;-)
+ #
+ # Example cont.: set the _flags_typeValue_ variable to the value of
+ # ${__flags_my_flag_type}, and see if it equals '4'.
+ _flags_typeVar_="__flags_${_flags_gFI_usName_}_${__FLAGS_INFO_TYPE}"
+ _flags_strToEval_="_flags_typeValue_=\"\${${_flags_typeVar_}:-}\""
+ eval "${_flags_strToEval_}"
+ # shellcheck disable=SC2154
+ if [ "${_flags_typeValue_}" = "${__FLAGS_TYPE_STRING}" ]; then
+ flags_return=${FLAGS_TRUE}
+ else
+ flags_return=${FLAGS_ERROR}
+ flags_error="missing flag info variable (${_flags_infoVar_})"
+ fi
+ fi
+
+ echo "${_flags_infoValue_}"
+ unset _flags_gFI_usName_ _flags_gfI_info_ _flags_infoValue_ _flags_infoVar_ \
+ _flags_strToEval_ _flags_typeValue_ _flags_typeVar_
+ [ ${flags_return} -eq ${FLAGS_ERROR} ] && _flags_error "${flags_error}"
+ return ${flags_return}
+}
+
+# Check for presence of item in a list.
+#
+# Passed a string (e.g. 'abc'), this function will determine if the string is
+# present in the list of strings (e.g. ' foo bar abc ').
+#
+# Args:
+# _flags_str_: string: string to search for in a list of strings
+# unnamed: list: list of strings
+# Returns:
+# boolean: true if item is in the list
+_flags_itemInList() {
+ _flags_str_=$1
+ shift
+
+ case " ${*:-} " in
+ *\ ${_flags_str_}\ *) flags_return=${FLAGS_TRUE} ;;
+ *) flags_return=${FLAGS_FALSE} ;;
+ esac
+
+ unset _flags_str_
+ return ${flags_return}
+}
+
+# Returns the width of the current screen.
+#
+# Output:
+# integer: width in columns of the current screen.
+_flags_columns() {
+ if [ -z "${__flags_columns}" ]; then
+ if eval stty size >/dev/null 2>&1; then
+ # stty size worked :-)
+ # shellcheck disable=SC2046
+ set -- `stty size`
+ __flags_columns="${2:-}"
+ fi
+ fi
+ if [ -z "${__flags_columns}" ]; then
+ if eval tput cols >/dev/null 2>&1; then
+ # shellcheck disable=SC2046
+ set -- `tput cols`
+ __flags_columns="${1:-}"
+ fi
+ fi
+ echo "${__flags_columns:-80}"
+}
+
+# Validate a boolean.
+#
+# Args:
+# _flags__bool: boolean: value to validate
+# Returns:
+# bool: true if the value is a valid boolean
+_flags_validBool() {
+ _flags_bool_=$1
+
+ flags_return=${FLAGS_TRUE}
+ case "${_flags_bool_}" in
+ true|t|0) ;;
+ false|f|1) ;;
+ *) flags_return=${FLAGS_FALSE} ;;
+ esac
+
+ unset _flags_bool_
+ return ${flags_return}
+}
+
+# Validate a float.
+#
+# Args:
+# _flags_float_: float: value to validate
+# Returns:
+# bool: true if the value is a valid integer
+_flags_validFloat() {
+ flags_return=${FLAGS_FALSE}
+ [ -n "$1" ] || return ${flags_return}
+ _flags_float_=$1
+
+ if _flags_validInt "${_flags_float_}"; then
+ flags_return=${FLAGS_TRUE}
+ elif _flags_useBuiltin; then
+ _flags_float_whole_=${_flags_float_%.*}
+ _flags_float_fraction_=${_flags_float_#*.}
+ if _flags_validInt "${_flags_float_whole_:-0}" -a \
+ _flags_validInt "${_flags_float_fraction_}"; then
+ flags_return=${FLAGS_TRUE}
+ fi
+ unset _flags_float_whole_ _flags_float_fraction_
+ else
+ flags_return=${FLAGS_TRUE}
+ case ${_flags_float_} in
+ -*) # Negative floats.
+ _flags_test_=`${FLAGS_EXPR_CMD} "${_flags_float_}" :\
+ '\(-[0-9]*\.[0-9]*\)'`
+ ;;
+ *) # Positive floats.
+ _flags_test_=`${FLAGS_EXPR_CMD} "${_flags_float_}" :\
+ '\([0-9]*\.[0-9]*\)'`
+ ;;
+ esac
+ [ "${_flags_test_}" != "${_flags_float_}" ] && flags_return=${FLAGS_FALSE}
+ unset _flags_test_
+ fi
+
+ unset _flags_float_ _flags_float_whole_ _flags_float_fraction_
+ return ${flags_return}
+}
+
+# Validate an integer.
+#
+# Args:
+# _flags_int_: integer: value to validate
+# Returns:
+# bool: true if the value is a valid integer
+_flags_validInt() {
+ flags_return=${FLAGS_FALSE}
+ [ -n "$1" ] || return ${flags_return}
+ _flags_int_=$1
+
+ case ${_flags_int_} in
+ -*.*) ;; # Ignore negative floats (we'll invalidate them later).
+ -*) # Strip possible leading negative sign.
+ if _flags_useBuiltin; then
+ _flags_int_=${_flags_int_#-}
+ else
+ _flags_int_=`${FLAGS_EXPR_CMD} "${_flags_int_}" : '-\([0-9][0-9]*\)'`
+ fi
+ ;;
+ esac
+
+ case ${_flags_int_} in
+ *[!0-9]*) flags_return=${FLAGS_FALSE} ;;
+ *) flags_return=${FLAGS_TRUE} ;;
+ esac
+
+ unset _flags_int_
+ return ${flags_return}
+}
+
+# Parse command-line options using the standard getopt.
+#
+# Note: the flag options are passed around in the global __flags_opts so that
+# the formatting is not lost due to shell parsing and such.
+#
+# Args:
+# @: varies: command-line options to parse
+# Returns:
+# integer: a FLAGS success condition
+_flags_getoptStandard() {
+ flags_return=${FLAGS_TRUE}
+ _flags_shortOpts_=`_flags_genOptStr ${__FLAGS_OPTSTR_SHORT}`
+
+ # Check for spaces in passed options.
+ for _flags_opt_ in "$@"; do
+ # Note: the silliness with the x's is purely for ksh93 on Ubuntu 6.06.
+ _flags_match_=`echo "x${_flags_opt_}x" |sed 's/ //g'`
+ if [ "${_flags_match_}" != "x${_flags_opt_}x" ]; then
+ flags_error='the available getopt does not support spaces in options'
+ flags_return=${FLAGS_ERROR}
+ break
+ fi
+ done
+
+ if [ ${flags_return} -eq ${FLAGS_TRUE} ]; then
+ __flags_opts=`getopt "${_flags_shortOpts_}" "$@" 2>&1`
+ _flags_rtrn_=$?
+ if [ ${_flags_rtrn_} -ne ${FLAGS_TRUE} ]; then
+ _flags_warn "${__flags_opts}"
+ flags_error='unable to parse provided options with getopt.'
+ flags_return=${FLAGS_ERROR}
+ fi
+ fi
+
+ unset _flags_match_ _flags_opt_ _flags_rtrn_ _flags_shortOpts_
+ return ${flags_return}
+}
+
+# Parse command-line options using the enhanced getopt.
+#
+# Note: the flag options are passed around in the global __flags_opts so that
+# the formatting is not lost due to shell parsing and such.
+#
+# Args:
+# @: varies: command-line options to parse
+# Returns:
+# integer: a FLAGS success condition
+_flags_getoptEnhanced() {
+ flags_return=${FLAGS_TRUE}
+ _flags_shortOpts_=`_flags_genOptStr ${__FLAGS_OPTSTR_SHORT}`
+ _flags_boolOpts_=`echo "${__flags_boolNames}" \
+ |sed 's/^ *//;s/ *$//;s/ /,/g'`
+ _flags_longOpts_=`_flags_genOptStr ${__FLAGS_OPTSTR_LONG}`
+
+ __flags_opts=`${FLAGS_GETOPT_CMD} \
+ -o "${_flags_shortOpts_}" \
+ -l "${_flags_longOpts_},${_flags_boolOpts_}" \
+ -- "$@" 2>&1`
+ _flags_rtrn_=$?
+ if [ ${_flags_rtrn_} -ne ${FLAGS_TRUE} ]; then
+ _flags_warn "${__flags_opts}"
+ flags_error='unable to parse provided options with getopt.'
+ flags_return=${FLAGS_ERROR}
+ fi
+
+ unset _flags_boolOpts_ _flags_longOpts_ _flags_rtrn_ _flags_shortOpts_
+ return ${flags_return}
+}
+
+# Dynamically parse a getopt result and set appropriate variables.
+#
+# This function does the actual conversion of getopt output and runs it through
+# the standard case structure for parsing. The case structure is actually quite
+# dynamic to support any number of flags.
+#
+# Args:
+# argc: int: original command-line argument count
+# @: varies: output from getopt parsing
+# Returns:
+# integer: a FLAGS success condition
+_flags_parseGetopt() {
+ _flags_argc_=$1
+ shift
+
+ flags_return=${FLAGS_TRUE}
+
+ if [ "${__FLAGS_GETOPT_VERS}" -ne "${__FLAGS_GETOPT_VERS_ENH}" ]; then
+ # The @$ must be unquoted as it needs to be re-split.
+ # shellcheck disable=SC2068
+ set -- $@
+ else
+ # Note the quotes around the `$@' -- they are essential!
+ eval set -- "$@"
+ fi
+
+ # Provide user with the number of arguments to shift by later.
+ # NOTE: the FLAGS_ARGC variable is obsolete as of 1.0.3 because it does not
+ # properly give user access to non-flag arguments mixed in between flag
+ # arguments. Its usage was replaced by FLAGS_ARGV, and it is being kept only
+ # for backwards compatibility reasons.
+ FLAGS_ARGC=`_flags_math "$# - 1 - ${_flags_argc_}"`
+ export FLAGS_ARGC
+
+ # Handle options. note options with values must do an additional shift.
+ while true; do
+ _flags_opt_=$1
+ _flags_arg_=${2:-}
+ _flags_type_=${__FLAGS_TYPE_NONE}
+ _flags_name_=''
+
+ # Determine long flag name.
+ case "${_flags_opt_}" in
+ --) shift; break ;; # Discontinue option parsing.
+
+ --*) # Long option.
+ if _flags_useBuiltin; then
+ _flags_opt_=${_flags_opt_#*--}
+ else
+ _flags_opt_=`${FLAGS_EXPR_CMD} "${_flags_opt_}" : '--\(.*\)'`
+ fi
+ _flags_len_=${__FLAGS_LEN_LONG}
+ if _flags_itemInList "${_flags_opt_}" "${__flags_longNames}"; then
+ _flags_name_=${_flags_opt_}
+ else
+ # Check for negated long boolean version.
+ if _flags_itemInList "${_flags_opt_}" "${__flags_boolNames}"; then
+ if _flags_useBuiltin; then
+ _flags_name_=${_flags_opt_#*no}
+ else
+ _flags_name_=`${FLAGS_EXPR_CMD} "${_flags_opt_}" : 'no\(.*\)'`
+ fi
+ _flags_type_=${__FLAGS_TYPE_BOOLEAN}
+ _flags_arg_=${__FLAGS_NULL}
+ fi
+ fi
+ ;;
+
+ -*) # Short option.
+ if _flags_useBuiltin; then
+ _flags_opt_=${_flags_opt_#*-}
+ else
+ _flags_opt_=`${FLAGS_EXPR_CMD} "${_flags_opt_}" : '-\(.*\)'`
+ fi
+ _flags_len_=${__FLAGS_LEN_SHORT}
+ if _flags_itemInList "${_flags_opt_}" "${__flags_shortNames}"; then
+ # Yes. Match short name to long name. Note purposeful off-by-one
+ # (too high) with awk calculations.
+ _flags_pos_=`echo "${__flags_shortNames}" \
+ |awk 'BEGIN{RS=" ";rn=0}$0==e{rn=NR}END{print rn}' \
+ e="${_flags_opt_}"`
+ _flags_name_=`echo "${__flags_longNames}" \
+ |awk 'BEGIN{RS=" "}rn==NR{print $0}' rn="${_flags_pos_}"`
+ fi
+ ;;
+ esac
+
+ # Die if the flag was unrecognized.
+ if [ -z "${_flags_name_}" ]; then
+ flags_error="unrecognized option (${_flags_opt_})"
+ flags_return=${FLAGS_ERROR}
+ break
+ fi
+
+ # Set new flag value.
+ _flags_usName_=`_flags_underscoreName "${_flags_name_}"`
+ [ ${_flags_type_} -eq ${__FLAGS_TYPE_NONE} ] && \
+ _flags_type_=`_flags_getFlagInfo \
+ "${_flags_usName_}" ${__FLAGS_INFO_TYPE}`
+ case ${_flags_type_} in
+ ${__FLAGS_TYPE_BOOLEAN})
+ if [ ${_flags_len_} -eq ${__FLAGS_LEN_LONG} ]; then
+ if [ "${_flags_arg_}" != "${__FLAGS_NULL}" ]; then
+ eval "FLAGS_${_flags_usName_}=${FLAGS_TRUE}"
+ else
+ eval "FLAGS_${_flags_usName_}=${FLAGS_FALSE}"
+ fi
+ else
+ _flags_strToEval_="_flags_val_=\
+\${__flags_${_flags_usName_}_${__FLAGS_INFO_DEFAULT}}"
+ eval "${_flags_strToEval_}"
+ # shellcheck disable=SC2154
+ if [ "${_flags_val_}" -eq ${FLAGS_FALSE} ]; then
+ eval "FLAGS_${_flags_usName_}=${FLAGS_TRUE}"
+ else
+ eval "FLAGS_${_flags_usName_}=${FLAGS_FALSE}"
+ fi
+ fi
+ ;;
+
+ ${__FLAGS_TYPE_FLOAT})
+ if _flags_validFloat "${_flags_arg_}"; then
+ eval "FLAGS_${_flags_usName_}='${_flags_arg_}'"
+ else
+ flags_error="invalid float value (${_flags_arg_})"
+ flags_return=${FLAGS_ERROR}
+ break
+ fi
+ ;;
+
+ ${__FLAGS_TYPE_INTEGER})
+ if _flags_validInt "${_flags_arg_}"; then
+ eval "FLAGS_${_flags_usName_}='${_flags_arg_}'"
+ else
+ flags_error="invalid integer value (${_flags_arg_})"
+ flags_return=${FLAGS_ERROR}
+ break
+ fi
+ ;;
+
+ ${__FLAGS_TYPE_STRING})
+ eval "FLAGS_${_flags_usName_}='${_flags_arg_}'"
+ ;;
+ esac
+
+ # Handle special case help flag.
+ if [ "${_flags_usName_}" = 'help' ]; then
+ # shellcheck disable=SC2154
+ if [ "${FLAGS_help}" -eq ${FLAGS_TRUE} ]; then
+ flags_help
+ flags_error='help requested'
+ flags_return=${FLAGS_FALSE}
+ break
+ fi
+ fi
+
+ # Shift the option and non-boolean arguments out.
+ shift
+ [ "${_flags_type_}" != ${__FLAGS_TYPE_BOOLEAN} ] && shift
+ done
+
+ # Give user back non-flag arguments.
+ FLAGS_ARGV=''
+ while [ $# -gt 0 ]; do
+ FLAGS_ARGV="${FLAGS_ARGV:+${FLAGS_ARGV} }'$1'"
+ shift
+ done
+
+ unset _flags_arg_ _flags_len_ _flags_name_ _flags_opt_ _flags_pos_ \
+ _flags_strToEval_ _flags_type_ _flags_usName_ _flags_val_
+ return ${flags_return}
+}
+
+# Perform some path using built-ins.
+#
+# Args:
+# $@: string: math expression to evaluate
+# Output:
+# integer: the result
+# Returns:
+# bool: success of math evaluation
+_flags_math() {
+ if [ $# -eq 0 ]; then
+ flags_return=${FLAGS_FALSE}
+ elif _flags_useBuiltin; then
+ # Variable assignment is needed as workaround for Solaris Bourne shell,
+ # which cannot parse a bare $((expression)).
+ # shellcheck disable=SC2016
+ _flags_expr_='$(($@))'
+ eval echo ${_flags_expr_}
+ flags_return=$?
+ unset _flags_expr_
+ else
+ eval expr "$@"
+ flags_return=$?
+ fi
+
+ return ${flags_return}
+}
+
+# Cross-platform strlen() implementation.
+#
+# Args:
+# _flags_str: string: to determine length of
+# Output:
+# integer: length of string
+# Returns:
+# bool: success of strlen evaluation
+_flags_strlen() {
+ _flags_str_=${1:-}
+
+ if [ -z "${_flags_str_}" ]; then
+ flags_output=0
+ elif _flags_useBuiltin; then
+ flags_output=${#_flags_str_}
+ else
+ flags_output=`${FLAGS_EXPR_CMD} "${_flags_str_}" : '.*'`
+ fi
+ flags_return=$?
+
+ unset _flags_str_
+ echo "${flags_output}"
+ return ${flags_return}
+}
+
+# Use built-in helper function to enable unit testing.
+#
+# Args:
+# None
+# Returns:
+# bool: true if built-ins should be used
+_flags_useBuiltin() { return ${__FLAGS_USE_BUILTIN}; }
+
+#------------------------------------------------------------------------------
+# public functions
+#
+# A basic boolean flag. Boolean flags do not take any arguments, and their
+# value is either 1 (false) or 0 (true). For long flags, the false value is
+# specified on the command line by prepending the word 'no'. With short flags,
+# the presence of the flag toggles the current value between true and false.
+# Specifying a short boolean flag twice on the command results in returning the
+# value back to the default value.
+#
+# A default value is required for boolean flags.
+#
+# For example, lets say a Boolean flag was created whose long name was 'update'
+# and whose short name was 'x', and the default value was 'false'. This flag
+# could be explicitly set to 'true' with '--update' or by '-x', and it could be
+# explicitly set to 'false' with '--noupdate'.
+DEFINE_boolean() { _flags_define ${__FLAGS_TYPE_BOOLEAN} "$@"; }
+
+# Other basic flags.
+DEFINE_float() { _flags_define ${__FLAGS_TYPE_FLOAT} "$@"; }
+DEFINE_integer() { _flags_define ${__FLAGS_TYPE_INTEGER} "$@"; }
+DEFINE_string() { _flags_define ${__FLAGS_TYPE_STRING} "$@"; }
+
+# Parse the flags.
+#
+# Args:
+# unnamed: list: command-line flags to parse
+# Returns:
+# integer: success of operation, or error
+FLAGS() {
+ # Define a standard 'help' flag if one isn't already defined.
+ [ -z "${__flags_help_type:-}" ] && \
+ DEFINE_boolean 'help' false 'show this help' 'h'
+
+ # Parse options.
+ if [ $# -gt 0 ]; then
+ if [ "${__FLAGS_GETOPT_VERS}" -ne "${__FLAGS_GETOPT_VERS_ENH}" ]; then
+ _flags_getoptStandard "$@"
+ else
+ _flags_getoptEnhanced "$@"
+ fi
+ flags_return=$?
+ else
+ # Nothing passed; won't bother running getopt.
+ __flags_opts='--'
+ flags_return=${FLAGS_TRUE}
+ fi
+
+ if [ ${flags_return} -eq ${FLAGS_TRUE} ]; then
+ _flags_parseGetopt $# "${__flags_opts}"
+ flags_return=$?
+ fi
+
+ [ ${flags_return} -eq ${FLAGS_ERROR} ] && _flags_fatal "${flags_error}"
+ return ${flags_return}
+}
+
+# This is a helper function for determining the 'getopt' version for platforms
+# where the detection isn't working. It simply outputs debug information that
+# can be included in a bug report.
+#
+# Args:
+# none
+# Output:
+# debug info that can be included in a bug report
+# Returns:
+# nothing
+flags_getoptInfo() {
+ # Platform info.
+ _flags_debug "uname -a: `uname -a`"
+ _flags_debug "PATH: ${PATH}"
+
+ # Shell info.
+ if [ -n "${BASH_VERSION:-}" ]; then
+ _flags_debug 'shell: bash'
+ _flags_debug "BASH_VERSION: ${BASH_VERSION}"
+ elif [ -n "${ZSH_VERSION:-}" ]; then
+ _flags_debug 'shell: zsh'
+ _flags_debug "ZSH_VERSION: ${ZSH_VERSION}"
+ fi
+
+ # getopt info.
+ ${FLAGS_GETOPT_CMD} >/dev/null
+ _flags_getoptReturn=$?
+ _flags_debug "getopt return: ${_flags_getoptReturn}"
+ _flags_debug "getopt --version: `${FLAGS_GETOPT_CMD} --version 2>&1`"
+
+ unset _flags_getoptReturn
+}
+
+# Returns whether the detected getopt version is the enhanced version.
+#
+# Args:
+# none
+# Output:
+# none
+# Returns:
+# bool: true if getopt is the enhanced version
+flags_getoptIsEnh() {
+ test "${__FLAGS_GETOPT_VERS}" -eq "${__FLAGS_GETOPT_VERS_ENH}"
+}
+
+# Returns whether the detected getopt version is the standard version.
+#
+# Args:
+# none
+# Returns:
+# bool: true if getopt is the standard version
+flags_getoptIsStd() {
+ test "${__FLAGS_GETOPT_VERS}" -eq "${__FLAGS_GETOPT_VERS_STD}"
+}
+
+# This is effectively a 'usage()' function. It prints usage information and
+# exits the program with ${FLAGS_FALSE} if it is ever found in the command line
+# arguments. Note this function can be overridden so other apps can define
+# their own --help flag, replacing this one, if they want.
+#
+# Args:
+# none
+# Returns:
+# integer: success of operation (always returns true)
+flags_help() {
+ if [ -n "${FLAGS_HELP:-}" ]; then
+ echo "${FLAGS_HELP}" >&2
+ else
+ echo "USAGE: ${FLAGS_PARENT:-$0} [flags] args" >&2
+ fi
+ if [ -n "${__flags_longNames}" ]; then
+ echo 'flags:' >&2
+ for flags_name_ in ${__flags_longNames}; do
+ flags_flagStr_=''
+ flags_boolStr_=''
+ flags_usName_=`_flags_underscoreName "${flags_name_}"`
+
+ flags_default_=`_flags_getFlagInfo \
+ "${flags_usName_}" ${__FLAGS_INFO_DEFAULT}`
+ flags_help_=`_flags_getFlagInfo \
+ "${flags_usName_}" ${__FLAGS_INFO_HELP}`
+ flags_short_=`_flags_getFlagInfo \
+ "${flags_usName_}" ${__FLAGS_INFO_SHORT}`
+ flags_type_=`_flags_getFlagInfo \
+ "${flags_usName_}" ${__FLAGS_INFO_TYPE}`
+
+ [ "${flags_short_}" != "${__FLAGS_NULL}" ] && \
+ flags_flagStr_="-${flags_short_}"
+
+ if [ "${__FLAGS_GETOPT_VERS}" -eq "${__FLAGS_GETOPT_VERS_ENH}" ]; then
+ [ "${flags_short_}" != "${__FLAGS_NULL}" ] && \
+ flags_flagStr_="${flags_flagStr_},"
+ # Add [no] to long boolean flag names, except the 'help' flag.
+ [ "${flags_type_}" -eq ${__FLAGS_TYPE_BOOLEAN} \
+ -a "${flags_usName_}" != 'help' ] && \
+ flags_boolStr_='[no]'
+ flags_flagStr_="${flags_flagStr_}--${flags_boolStr_}${flags_name_}:"
+ fi
+
+ case ${flags_type_} in
+ ${__FLAGS_TYPE_BOOLEAN})
+ if [ "${flags_default_}" -eq ${FLAGS_TRUE} ]; then
+ flags_defaultStr_='true'
+ else
+ flags_defaultStr_='false'
+ fi
+ ;;
+ ${__FLAGS_TYPE_FLOAT}|${__FLAGS_TYPE_INTEGER})
+ flags_defaultStr_=${flags_default_} ;;
+ ${__FLAGS_TYPE_STRING}) flags_defaultStr_="'${flags_default_}'" ;;
+ esac
+ flags_defaultStr_="(default: ${flags_defaultStr_})"
+
+ flags_helpStr_=" ${flags_flagStr_} ${flags_help_:+${flags_help_} }${flags_defaultStr_}"
+ _flags_strlen "${flags_helpStr_}" >/dev/null
+ flags_helpStrLen_=${flags_output}
+ flags_columns_=`_flags_columns`
+
+ if [ "${flags_helpStrLen_}" -lt "${flags_columns_}" ]; then
+ echo "${flags_helpStr_}" >&2
+ else
+ echo " ${flags_flagStr_} ${flags_help_}" >&2
+ # Note: the silliness with the x's is purely for ksh93 on Ubuntu 6.06
+ # because it doesn't like empty strings when used in this manner.
+ flags_emptyStr_="`echo \"x${flags_flagStr_}x\" \
+ |awk '{printf "%"length($0)-2"s", ""}'`"
+ flags_helpStr_=" ${flags_emptyStr_} ${flags_defaultStr_}"
+ _flags_strlen "${flags_helpStr_}" >/dev/null
+ flags_helpStrLen_=${flags_output}
+
+ if [ "${__FLAGS_GETOPT_VERS}" -eq "${__FLAGS_GETOPT_VERS_STD}" \
+ -o "${flags_helpStrLen_}" -lt "${flags_columns_}" ]; then
+ # Indented to match help string.
+ echo "${flags_helpStr_}" >&2
+ else
+ # Indented four from left to allow for longer defaults as long flag
+ # names might be used too, making things too long.
+ echo " ${flags_defaultStr_}" >&2
+ fi
+ fi
+ done
+ fi
+
+ unset flags_boolStr_ flags_default_ flags_defaultStr_ flags_emptyStr_ \
+ flags_flagStr_ flags_help_ flags_helpStr flags_helpStrLen flags_name_ \
+ flags_columns_ flags_short_ flags_type_ flags_usName_
+ return ${FLAGS_TRUE}
+}
+
+# Reset shflags back to an uninitialized state.
+#
+# Args:
+# none
+# Returns:
+# nothing
+flags_reset() {
+ for flags_name_ in ${__flags_longNames}; do
+ flags_usName_=`_flags_underscoreName "${flags_name_}"`
+ flags_strToEval_="unset FLAGS_${flags_usName_}"
+ for flags_type_ in \
+ ${__FLAGS_INFO_DEFAULT} \
+ ${__FLAGS_INFO_HELP} \
+ ${__FLAGS_INFO_SHORT} \
+ ${__FLAGS_INFO_TYPE}
+ do
+ flags_strToEval_=\
+"${flags_strToEval_} __flags_${flags_usName_}_${flags_type_}"
+ done
+ eval "${flags_strToEval_}"
+ done
+
+ # Reset internal variables.
+ __flags_boolNames=' '
+ __flags_longNames=' '
+ __flags_shortNames=' '
+ __flags_definedNames=' '
+
+ # Reset logging level back to default.
+ flags_setLoggingLevel ${__FLAGS_LEVEL_DEFAULT}
+
+ unset flags_name_ flags_type_ flags_strToEval_ flags_usName_
+}
+
+#
+# Initialization
+#
+
+# Set the default logging level.
+flags_setLoggingLevel ${__FLAGS_LEVEL_DEFAULT}
diff --git a/src/fluent-bit/tests/lib/shunit2/lib/versions b/src/fluent-bit/tests/lib/shunit2/lib/versions
new file mode 100755
index 000000000..3b64ff5b6
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/lib/versions
@@ -0,0 +1,273 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Versions determines the versions of all installed shells.
+#
+# Copyright 2008-2018 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 License.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shlib
+#
+# This library provides reusable functions that determine actual names and
+# versions of installed shells and the OS. The library can also be run as a
+# script if set executable.
+#
+# Disable checks that aren't fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+
+ARGV0=`basename "$0"`
+LSB_RELEASE='/etc/lsb-release'
+VERSIONS_SHELLS='ash /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh /bin/sh /usr/xpg4/bin/sh /sbin/sh'
+
+true; TRUE=$?
+false; FALSE=$?
+ERROR=2
+
+UNAME_R=`uname -r`
+UNAME_S=`uname -s`
+
+__versions_haveStrings=${ERROR}
+
+versions_osName() {
+ os_name_='unrecognized'
+ os_system_=${UNAME_S}
+ os_release_=${UNAME_R}
+ case ${os_system_} in
+ CYGWIN_NT-*) os_name_='Cygwin' ;;
+ Darwin)
+ os_name_=`/usr/bin/sw_vers -productName`
+ os_version_=`versions_osVersion`
+ case ${os_version_} in
+ 10.4|10.4.[0-9]*) os_name_='Mac OS X Tiger' ;;
+ 10.5|10.5.[0-9]*) os_name_='Mac OS X Leopard' ;;
+ 10.6|10.6.[0-9]*) os_name_='Mac OS X Snow Leopard' ;;
+ 10.7|10.7.[0-9]*) os_name_='Mac OS X Lion' ;;
+ 10.8|10.8.[0-9]*) os_name_='Mac OS X Mountain Lion' ;;
+ 10.9|10.9.[0-9]*) os_name_='Mac OS X Mavericks' ;;
+ 10.10|10.10.[0-9]*) os_name_='Mac OS X Yosemite' ;;
+ 10.11|10.11.[0-9]*) os_name_='Mac OS X El Capitan' ;;
+ 10.12|10.12.[0-9]*) os_name_='macOS Sierra' ;;
+ 10.13|10.13.[0-9]*) os_name_='macOS High Sierra' ;;
+ 10.14|10.14.[0-9]*) os_name_='macOS Mojave' ;;
+ *) os_name_='macOS' ;;
+ esac
+ ;;
+ FreeBSD) os_name_='FreeBSD' ;;
+ Linux) os_name_='Linux' ;;
+ SunOS)
+ os_name_='SunOS'
+ if [ -r '/etc/release' ]; then
+ if grep 'OpenSolaris' /etc/release >/dev/null; then
+ os_name_='OpenSolaris'
+ else
+ os_name_='Solaris'
+ fi
+ fi
+ ;;
+ esac
+
+ echo ${os_name_}
+ unset os_name_ os_system_ os_release_ os_version_
+}
+
+versions_osVersion() {
+ os_version_='unrecognized'
+ os_system_=${UNAME_S}
+ os_release_=${UNAME_R}
+ case ${os_system_} in
+ CYGWIN_NT-*)
+ os_version_=`expr "${os_release_}" : '\([0-9]*\.[0-9]\.[0-9]*\).*'`
+ ;;
+ Darwin)
+ os_version_=`/usr/bin/sw_vers -productVersion`
+ ;;
+ FreeBSD)
+ os_version_=`expr "${os_release_}" : '\([0-9]*\.[0-9]*\)-.*'`
+ ;;
+ Linux)
+ if [ -r '/etc/os-release' ]; then
+ os_version_=`awk -F= '$1~/PRETTY_NAME/{print $2}' /etc/os-release \
+ |sed 's/"//g'`
+ elif [ -r '/etc/redhat-release' ]; then
+ os_version_=`cat /etc/redhat-release`
+ elif [ -r '/etc/SuSE-release' ]; then
+ os_version_=`head -n 1 /etc/SuSE-release`
+ elif [ -r "${LSB_RELEASE}" ]; then
+ if grep -q 'DISTRIB_ID=Ubuntu' "${LSB_RELEASE}"; then
+ # shellcheck disable=SC2002
+ os_version_=`cat "${LSB_RELEASE}" \
+ |awk -F= '$1~/DISTRIB_DESCRIPTION/{print $2}' \
+ |sed 's/"//g;s/ /-/g'`
+ fi
+ fi
+ ;;
+ SunOS)
+ if [ -r '/etc/release' ]; then
+ if grep 'OpenSolaris' /etc/release >/dev/null; then # OpenSolaris
+ os_version_=`grep 'OpenSolaris' /etc/release |awk '{print $2"("$3")"}'`
+ else # Solaris
+ major_=`echo "${os_release_}" |sed 's/[0-9]*\.\([0-9]*\)/\1/'`
+ minor_=`grep Solaris /etc/release |sed 's/[^u]*\(u[0-9]*\).*/\1/'`
+ os_version_="${major_}${minor_}"
+ fi
+ fi
+ ;;
+ esac
+
+ echo "${os_version_}"
+ unset os_release_ os_system_ os_version_ major_ minor_
+}
+
+versions_shellVersion() {
+ shell_=$1
+
+ shell_present_=${FALSE}
+ case "${shell_}" in
+ ash) [ -x '/bin/busybox' ] && shell_present_=${TRUE} ;;
+ *) [ -x "${shell_}" ] && shell_present_=${TRUE} ;;
+ esac
+ if [ ${shell_present_} -eq ${FALSE} ]; then
+ echo 'not installed'
+ return ${FALSE}
+ fi
+
+ version_=''
+ case ${shell_} in
+ /sbin/sh) ;; # SunOS
+ /usr/xpg4/bin/sh)
+ version_=`versions_shell_xpg4 "${shell_}"`
+ ;; # SunOS
+ */sh)
+ # This could be one of any number of shells. Try until one fits.
+ version_=''
+ [ -z "${version_}" ] && version_=`versions_shell_bash "${shell_}"`
+ # dash cannot be self determined yet
+ [ -z "${version_}" ] && version_=`versions_shell_ksh "${shell_}"`
+ # pdksh is covered in versions_shell_ksh()
+ [ -z "${version_}" ] && version_=`versions_shell_xpg4 "${shell_}"`
+ [ -z "${version_}" ] && version_=`versions_shell_zsh "${shell_}"`
+ ;;
+ ash) version_=`versions_shell_ash "${shell_}"` ;;
+ */bash) version_=`versions_shell_bash "${shell_}"` ;;
+ */dash)
+ # Assuming Ubuntu Linux until somebody comes up with a better test. The
+ # following test will return an empty string if dash is not installed.
+ version_=`versions_shell_dash`
+ ;;
+ */ksh) version_=`versions_shell_ksh "${shell_}"` ;;
+ */pdksh) version_=`versions_shell_pdksh "${shell_}"` ;;
+ */zsh) version_=`versions_shell_zsh "${shell_}"` ;;
+ *) version_='invalid'
+ esac
+
+ echo "${version_:-unknown}"
+ unset shell_ version_
+}
+
+# The ash shell is included in BusyBox.
+versions_shell_ash() {
+ busybox --help |head -1 |sed 's/BusyBox v\([0-9.]*\) .*/\1/'
+}
+
+versions_shell_bash() {
+ $1 --version : 2>&1 |grep 'GNU bash' |sed 's/.*version \([^ ]*\).*/\1/'
+}
+
+versions_shell_dash() {
+ eval dpkg >/dev/null 2>&1
+ [ $? -eq 127 ] && return # Return if dpkg not found.
+
+ dpkg -l |grep ' dash ' |awk '{print $3}'
+}
+
+versions_shell_ksh() {
+ versions_shell_=$1
+ versions_version_=''
+
+ # Try a few different ways to figure out the version.
+ versions_version_=`${versions_shell_} --version : 2>&1`
+ # shellcheck disable=SC2181
+ if [ $? -eq 0 ]; then
+ versions_version_=`echo "${versions_version_}" \
+ |sed 's/.*\([0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\).*/\1/'`
+ else
+ versions_version_=''
+ fi
+ if [ -z "${versions_version_}" ]; then
+ _versions_have_strings
+ versions_version_=`strings "${versions_shell_}" 2>&1 \
+ |grep Version \
+ |sed 's/^.*Version \(.*\)$/\1/;s/ s+ \$$//;s/ /-/g'`
+ fi
+ if [ -z "${versions_version_}" ]; then
+ versions_version_=`versions_shell_pdksh "${versions_shell_}"`
+ fi
+
+ echo "${versions_version_}"
+ unset versions_shell_ versions_version_
+}
+
+versions_shell_pdksh() {
+ _versions_have_strings
+ strings "$1" 2>&1 \
+ |grep 'PD KSH' \
+ |sed -e 's/.*PD KSH \(.*\)/\1/;s/ /-/g'
+}
+
+versions_shell_xpg4() {
+ _versions_have_strings
+ strings "$1" 2>&1 \
+ |grep 'Version' \
+ |sed -e 's/^@(#)Version //'
+}
+
+versions_shell_zsh() {
+ versions_shell_=$1
+
+ # Try a few different ways to figure out the version.
+ # shellcheck disable=SC2016
+ versions_version_=`echo 'echo ${ZSH_VERSION}' |${versions_shell_}`
+ if [ -z "${versions_version_}" ]; then
+ versions_version_=`${versions_shell_} --version : 2>&1`
+ # shellcheck disable=SC2181
+ if [ $? -eq 0 ]; then
+ versions_version_=`echo "${versions_version_}" |awk '{print $2}'`
+ else
+ versions_version_=''
+ fi
+ fi
+
+ echo "${versions_version_}"
+ unset versions_shell_ versions_version_
+}
+
+# Determine if the 'strings' binary installed.
+_versions_have_strings() {
+ [ ${__versions_haveStrings} -ne ${ERROR} ] && return
+ if eval strings /dev/null >/dev/null 2>&1; then
+ __versions_haveStrings=${TRUE}
+ return
+ fi
+
+ echo 'WARN: strings not installed. try installing binutils?' >&2
+ __versions_haveStrings=${FALSE}
+}
+
+versions_main() {
+ # Treat unset variables as an error.
+ set -u
+
+ os_name=`versions_osName`
+ os_version=`versions_osVersion`
+ echo "os: ${os_name} version: ${os_version}"
+
+ for shell in ${VERSIONS_SHELLS}; do
+ shell_version=`versions_shellVersion "${shell}"`
+ echo "shell: ${shell} version: ${shell_version}"
+ done
+}
+
+if [ "${ARGV0}" = 'versions' ]; then
+ versions_main "$@"
+fi
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2 b/src/fluent-bit/tests/lib/shunit2/shunit2
new file mode 100755
index 000000000..6239683af
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2
@@ -0,0 +1,1343 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008-2020 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# shUnit2 -- Unit testing framework for Unix shell scripts.
+# https://github.com/kward/shunit2
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 is a xUnit based unit test framework for Bourne shell scripts. It is
+# based on the popular JUnit unit testing framework for Java.
+#
+# $() are not fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+# expr may be antiquated, but it is the only solution in some cases.
+# shellcheck disable=SC2003
+
+# Return if shunit2 already loaded.
+command [ -n "${SHUNIT_VERSION:-}" ] && exit 0
+SHUNIT_VERSION='2.1.8'
+
+# Return values that scripts can use.
+SHUNIT_TRUE=0
+SHUNIT_FALSE=1
+SHUNIT_ERROR=2
+
+# Logging functions.
+_shunit_warn() {
+ ${__SHUNIT_CMD_ECHO_ESC} \
+ "${__shunit_ansi_yellow}shunit2:WARN${__shunit_ansi_none} $*" >&2
+}
+_shunit_error() {
+ ${__SHUNIT_CMD_ECHO_ESC} \
+ "${__shunit_ansi_red}shunit2:ERROR${__shunit_ansi_none} $*" >&2
+}
+_shunit_fatal() {
+ ${__SHUNIT_CMD_ECHO_ESC} \
+ "${__shunit_ansi_red}shunit2:FATAL${__shunit_ansi_none} $*" >&2
+ exit ${SHUNIT_ERROR}
+}
+
+# Determine some reasonable command defaults.
+__SHUNIT_CMD_ECHO_ESC='echo -e'
+# shellcheck disable=SC2039
+command [ "`echo -e test`" = '-e test' ] && __SHUNIT_CMD_ECHO_ESC='echo'
+
+__SHUNIT_UNAME_S=`uname -s`
+case "${__SHUNIT_UNAME_S}" in
+ BSD) __SHUNIT_CMD_EXPR='gexpr' ;;
+ *) __SHUNIT_CMD_EXPR='expr' ;;
+esac
+__SHUNIT_CMD_TPUT='tput'
+
+# Commands a user can override if needed.
+SHUNIT_CMD_EXPR=${SHUNIT_CMD_EXPR:-${__SHUNIT_CMD_EXPR}}
+SHUNIT_CMD_TPUT=${SHUNIT_CMD_TPUT:-${__SHUNIT_CMD_TPUT}}
+
+# Enable color output. Options are 'never', 'always', or 'auto'.
+SHUNIT_COLOR=${SHUNIT_COLOR:-auto}
+
+# Specific shell checks.
+if command [ -n "${ZSH_VERSION:-}" ]; then
+ setopt |grep "^shwordsplit$" >/dev/null
+ if command [ $? -ne ${SHUNIT_TRUE} ]; then
+ _shunit_fatal 'zsh shwordsplit option is required for proper operation'
+ fi
+ if command [ -z "${SHUNIT_PARENT:-}" ]; then
+ _shunit_fatal "zsh does not pass \$0 through properly. please declare \
+\"SHUNIT_PARENT=\$0\" before calling shUnit2"
+ fi
+fi
+
+#
+# Constants
+#
+
+__SHUNIT_MODE_SOURCED='sourced'
+__SHUNIT_MODE_STANDALONE='standalone'
+__SHUNIT_PARENT=${SHUNIT_PARENT:-$0}
+
+# User provided test prefix to display in front of the name of the test being
+# executed. Define by setting the SHUNIT_TEST_PREFIX variable.
+__SHUNIT_TEST_PREFIX=${SHUNIT_TEST_PREFIX:-}
+
+# ANSI colors.
+__SHUNIT_ANSI_NONE='\033[0m'
+__SHUNIT_ANSI_RED='\033[1;31m'
+__SHUNIT_ANSI_GREEN='\033[1;32m'
+__SHUNIT_ANSI_YELLOW='\033[1;33m'
+__SHUNIT_ANSI_CYAN='\033[1;36m'
+
+# Set the constants readonly.
+__shunit_constants=`set |grep '^__SHUNIT_' |cut -d= -f1`
+echo "${__shunit_constants}" |grep '^Binary file' >/dev/null && \
+ __shunit_constants=`set |grep -a '^__SHUNIT_' |cut -d= -f1`
+for __shunit_const in ${__shunit_constants}; do
+ if command [ -z "${ZSH_VERSION:-}" ]; then
+ readonly "${__shunit_const}"
+ else
+ case ${ZSH_VERSION} in
+ [123].*) readonly "${__shunit_const}" ;;
+ *) readonly -g "${__shunit_const}" # Declare readonly constants globally.
+ esac
+ fi
+done
+unset __shunit_const __shunit_constants
+
+#
+# Internal variables.
+#
+
+# Variables.
+__shunit_lineno='' # Line number of executed test.
+__shunit_mode=${__SHUNIT_MODE_SOURCED} # Operating mode.
+__shunit_reportGenerated=${SHUNIT_FALSE} # Is report generated.
+__shunit_script='' # Filename of unittest script (standalone mode).
+__shunit_skip=${SHUNIT_FALSE} # Is skipping enabled.
+__shunit_suite='' # Suite of tests to execute.
+__shunit_clean=${SHUNIT_FALSE} # _shunit_cleanup() was already called.
+
+# ANSI colors (populated by _shunit_configureColor()).
+__shunit_ansi_none=''
+__shunit_ansi_red=''
+__shunit_ansi_green=''
+__shunit_ansi_yellow=''
+__shunit_ansi_cyan=''
+
+# Counts of tests.
+__shunit_testSuccess=${SHUNIT_TRUE}
+__shunit_testsTotal=0
+__shunit_testsPassed=0
+__shunit_testsFailed=0
+
+# Counts of asserts.
+__shunit_assertsTotal=0
+__shunit_assertsPassed=0
+__shunit_assertsFailed=0
+__shunit_assertsSkipped=0
+
+#
+# Macros.
+#
+
+# shellcheck disable=SC2016,SC2089
+_SHUNIT_LINENO_='eval __shunit_lineno=""; if command [ "${1:-}" = "--lineno" ]; then command [ -n "$2" ] && __shunit_lineno="[$2] "; shift 2; fi'
+
+#-----------------------------------------------------------------------------
+# Assertion functions.
+#
+
+# Assert that two values are equal to one another.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertEquals() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertEquals() requires two or three arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_expected_=$1
+ shunit_actual_=$2
+
+ shunit_return=${SHUNIT_TRUE}
+ if command [ "${shunit_expected_}" = "${shunit_actual_}" ]; then
+ _shunit_assertPass
+ else
+ failNotEquals "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}"
+ shunit_return=${SHUNIT_FALSE}
+ fi
+
+ unset shunit_message_ shunit_expected_ shunit_actual_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_EQUALS_='eval assertEquals --lineno "${LINENO:-}"'
+
+# Assert that two values are not equal to one another.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotEquals() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertNotEquals() requires two or three arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_expected_=$1
+ shunit_actual_=$2
+
+ shunit_return=${SHUNIT_TRUE}
+ if command [ "${shunit_expected_}" != "${shunit_actual_}" ]; then
+ _shunit_assertPass
+ else
+ failSame "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}"
+ shunit_return=${SHUNIT_FALSE}
+ fi
+
+ unset shunit_message_ shunit_expected_ shunit_actual_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_NOT_EQUALS_='eval assertNotEquals --lineno "${LINENO:-}"'
+
+# Assert that a container contains a content.
+#
+# Args:
+# message: string: failure message [optional]
+# container: string: container to analyze
+# content: string: content to find
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertContains() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertContains() requires two or three arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_container_=$1
+ shunit_content_=$2
+
+ shunit_return=${SHUNIT_TRUE}
+ if echo "$shunit_container_" | grep -F -- "$shunit_content_" > /dev/null; then
+ _shunit_assertPass
+ else
+ failNotFound "${shunit_message_}" "${shunit_content_}"
+ shunit_return=${SHUNIT_FALSE}
+ fi
+
+ unset shunit_message_ shunit_container_ shunit_content_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_CONTAINS_='eval assertContains --lineno "${LINENO:-}"'
+
+# Assert that a container does not contain a content.
+#
+# Args:
+# message: string: failure message [optional]
+# container: string: container to analyze
+# content: string: content to look for
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotContains() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertNotContains() requires two or three arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_container_=$1
+ shunit_content_=$2
+
+ shunit_return=${SHUNIT_TRUE}
+ if echo "$shunit_container_" | grep -F -- "$shunit_content_" > /dev/null; then
+ failFound "${shunit_message_}" "${shunit_content_}"
+ shunit_return=${SHUNIT_FALSE}
+ else
+ _shunit_assertPass
+ fi
+
+ unset shunit_message_ shunit_container_ shunit_content_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_NOT_CONTAINS_='eval assertNotContains --lineno "${LINENO:-}"'
+
+# Assert that a value is null (i.e. an empty string)
+#
+# Args:
+# message: string: failure message [optional]
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNull() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "assertNull() requires one or two arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ assertTrue "${shunit_message_}" "[ -z '$1' ]"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_NULL_='eval assertNull --lineno "${LINENO:-}"'
+
+# Assert that a value is not null (i.e. a non-empty string)
+#
+# Args:
+# message: string: failure message [optional]
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotNull() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -gt 2 ]; then # allowing 0 arguments as $1 might actually be null
+ _shunit_error "assertNotNull() requires one or two arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_actual_=`_shunit_escapeCharactersInString "${1:-}"`
+ test -n "${shunit_actual_}"
+ assertTrue "${shunit_message_}" $?
+ shunit_return=$?
+
+ unset shunit_actual_ shunit_message_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_NOT_NULL_='eval assertNotNull --lineno "${LINENO:-}"'
+
+# Assert that two values are the same (i.e. equal to one another).
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertSame() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertSame() requires two or three arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ assertEquals "${shunit_message_}" "$1" "$2"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_SAME_='eval assertSame --lineno "${LINENO:-}"'
+
+# Assert that two values are not the same (i.e. not equal to one another).
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotSame() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertNotSame() requires two or three arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_:-}$1"
+ shift
+ fi
+ assertNotEquals "${shunit_message_}" "$1" "$2"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_NOT_SAME_='eval assertNotSame --lineno "${LINENO:-}"'
+
+# Assert that a value or shell test condition is true.
+#
+# In shell, a value of 0 is true and a non-zero value is false. Any integer
+# value passed can thereby be tested.
+#
+# Shell supports much more complicated tests though, and a means to support
+# them was needed. As such, this function tests that conditions are true or
+# false through evaluation rather than just looking for a true or false.
+#
+# The following test will succeed:
+# assertTrue 0
+# assertTrue "[ 34 -gt 23 ]"
+# The following test will fail with a message:
+# assertTrue 123
+# assertTrue "test failed" "[ -r '/non/existent/file' ]"
+#
+# Args:
+# message: string: failure message [optional]
+# condition: string: integer value or shell conditional statement
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertTrue() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "assertTrue() takes one or two arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_condition_=$1
+
+ # See if condition is an integer, i.e. a return value.
+ shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'`
+ shunit_return=${SHUNIT_TRUE}
+ if command [ -z "${shunit_condition_}" ]; then
+ # Null condition.
+ shunit_return=${SHUNIT_FALSE}
+ elif command [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ]
+ then
+ # Possible return value. Treating 0 as true, and non-zero as false.
+ command [ "${shunit_condition_}" -ne 0 ] && shunit_return=${SHUNIT_FALSE}
+ else
+ # Hopefully... a condition.
+ ( eval "${shunit_condition_}" ) >/dev/null 2>&1
+ command [ $? -ne 0 ] && shunit_return=${SHUNIT_FALSE}
+ fi
+
+ # Record the test.
+ if command [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then
+ _shunit_assertPass
+ else
+ _shunit_assertFail "${shunit_message_}"
+ fi
+
+ unset shunit_message_ shunit_condition_ shunit_match_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_TRUE_='eval assertTrue --lineno "${LINENO:-}"'
+
+# Assert that a value or shell test condition is false.
+#
+# In shell, a value of 0 is true and a non-zero value is false. Any integer
+# value passed can thereby be tested.
+#
+# Shell supports much more complicated tests though, and a means to support
+# them was needed. As such, this function tests that conditions are true or
+# false through evaluation rather than just looking for a true or false.
+#
+# The following test will succeed:
+# assertFalse 1
+# assertFalse "[ 'apples' = 'oranges' ]"
+# The following test will fail with a message:
+# assertFalse 0
+# assertFalse "test failed" "[ 1 -eq 1 -a 2 -eq 2 ]"
+#
+# Args:
+# message: string: failure message [optional]
+# condition: string: integer value or shell conditional statement
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertFalse() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "assertFalse() requires one or two arguments; $# given"
+ _shunit_assertFail
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_condition_=$1
+
+ # See if condition is an integer, i.e. a return value.
+ shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'`
+ shunit_return=${SHUNIT_TRUE}
+ if command [ -z "${shunit_condition_}" ]; then
+ # Null condition.
+ shunit_return=${SHUNIT_FALSE}
+ elif command [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ]
+ then
+ # Possible return value. Treating 0 as true, and non-zero as false.
+ command [ "${shunit_condition_}" -eq 0 ] && shunit_return=${SHUNIT_FALSE}
+ else
+ # Hopefully... a condition.
+ ( eval "${shunit_condition_}" ) >/dev/null 2>&1
+ command [ $? -eq 0 ] && shunit_return=${SHUNIT_FALSE}
+ fi
+
+ # Record the test.
+ if command [ "${shunit_return}" -eq "${SHUNIT_TRUE}" ]; then
+ _shunit_assertPass
+ else
+ _shunit_assertFail "${shunit_message_}"
+ fi
+
+ unset shunit_message_ shunit_condition_ shunit_match_
+ return "${shunit_return}"
+}
+# shellcheck disable=SC2016,SC2034
+_ASSERT_FALSE_='eval assertFalse --lineno "${LINENO:-}"'
+
+#-----------------------------------------------------------------------------
+# Failure functions.
+#
+
+# Records a test failure.
+#
+# Args:
+# message: string: failure message [optional]
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+fail() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -gt 1 ]; then
+ _shunit_error "fail() requires zero or one arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 1 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+
+ _shunit_assertFail "${shunit_message_}"
+
+ unset shunit_message_
+ return ${SHUNIT_FALSE}
+}
+# shellcheck disable=SC2016,SC2034
+_FAIL_='eval fail --lineno "${LINENO:-}"'
+
+# Records a test failure, stating two values were not equal.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failNotEquals() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "failNotEquals() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_expected_=$1
+ shunit_actual_=$2
+
+ shunit_message_=${shunit_message_%% }
+ _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected:<${shunit_expected_}> but was:<${shunit_actual_}>"
+
+ unset shunit_message_ shunit_expected_ shunit_actual_
+ return ${SHUNIT_FALSE}
+}
+# shellcheck disable=SC2016,SC2034
+_FAIL_NOT_EQUALS_='eval failNotEquals --lineno "${LINENO:-}"'
+
+# Records a test failure, stating a value was found.
+#
+# Args:
+# message: string: failure message [optional]
+# content: string: found value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failFound() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "failFound() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+
+ shunit_message_=${shunit_message_%% }
+ _shunit_assertFail "${shunit_message_:+${shunit_message_} }Found"
+
+ unset shunit_message_
+ return ${SHUNIT_FALSE}
+}
+# shellcheck disable=SC2016,SC2034
+_FAIL_FOUND_='eval failFound --lineno "${LINENO:-}"'
+
+# Records a test failure, stating a content was not found.
+#
+# Args:
+# message: string: failure message [optional]
+# content: string: content not found
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failNotFound() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "failNotFound() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_content_=$1
+
+ shunit_message_=${shunit_message_%% }
+ _shunit_assertFail "${shunit_message_:+${shunit_message_} }Not found:<${shunit_content_}>"
+
+ unset shunit_message_ shunit_content_
+ return ${SHUNIT_FALSE}
+}
+# shellcheck disable=SC2016,SC2034
+_FAIL_NOT_FOUND_='eval failNotFound --lineno "${LINENO:-}"'
+
+# Records a test failure, stating two values should have been the same.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failSame()
+{
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "failSame() requires two or three arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+
+ shunit_message_=${shunit_message_%% }
+ _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected not same"
+
+ unset shunit_message_
+ return ${SHUNIT_FALSE}
+}
+# shellcheck disable=SC2016,SC2034
+_FAIL_SAME_='eval failSame --lineno "${LINENO:-}"'
+
+# Records a test failure, stating two values were not equal.
+#
+# This is functionally equivalent to calling failNotEquals().
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failNotSame() {
+ # shellcheck disable=SC2090
+ ${_SHUNIT_LINENO_}
+ if command [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "failNotSame() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if command [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ failNotEquals "${shunit_message_}" "$1" "$2"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+# shellcheck disable=SC2016,SC2034
+_FAIL_NOT_SAME_='eval failNotSame --lineno "${LINENO:-}"'
+
+#-----------------------------------------------------------------------------
+# Skipping functions.
+#
+
+# Force remaining assert and fail functions to be "skipped".
+#
+# This function forces the remaining assert and fail functions to be "skipped",
+# i.e. they will have no effect. Each function skipped will be recorded so that
+# the total of asserts and fails will not be altered.
+#
+# Args:
+# None
+startSkipping() { __shunit_skip=${SHUNIT_TRUE}; }
+
+# Resume the normal recording behavior of assert and fail calls.
+#
+# Args:
+# None
+endSkipping() { __shunit_skip=${SHUNIT_FALSE}; }
+
+# Returns the state of assert and fail call skipping.
+#
+# Args:
+# None
+# Returns:
+# boolean: (TRUE/FALSE constant)
+isSkipping() { return ${__shunit_skip}; }
+
+#-----------------------------------------------------------------------------
+# Suite functions.
+#
+
+# Stub. This function should contains all unit test calls to be made.
+#
+# DEPRECATED (as of 2.1.0)
+#
+# This function can be optionally overridden by the user in their test suite.
+#
+# If this function exists, it will be called when shunit2 is sourced. If it
+# does not exist, shunit2 will search the parent script for all functions
+# beginning with the word 'test', and they will be added dynamically to the
+# test suite.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#suite() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Adds a function name to the list of tests schedule for execution.
+#
+# This function should only be called from within the suite() function.
+#
+# Args:
+# function: string: name of a function to add to current unit test suite
+suite_addTest() {
+ shunit_func_=${1:-}
+
+ __shunit_suite="${__shunit_suite:+${__shunit_suite} }${shunit_func_}"
+ __shunit_testsTotal=`expr ${__shunit_testsTotal} + 1`
+
+ unset shunit_func_
+}
+
+# Stub. This function will be called once before any tests are run.
+#
+# Common one-time environment preparation tasks shared by all tests can be
+# defined here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#oneTimeSetUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Stub. This function will be called once after all tests are finished.
+#
+# Common one-time environment cleanup tasks shared by all tests can be defined
+# here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#oneTimeTearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Stub. This function will be called before each test is run.
+#
+# Common environment preparation tasks shared by all tests can be defined here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#setUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Note: see _shunit_mktempFunc() for actual implementation
+# Stub. This function will be called after each test is run.
+#
+# Common environment cleanup tasks shared by all tests can be defined here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#tearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+#------------------------------------------------------------------------------
+# Internal shUnit2 functions.
+#
+
+# Create a temporary directory to store various run-time files in.
+#
+# This function is a cross-platform temporary directory creation tool. Not all
+# OSes have the `mktemp` function, so one is included here.
+#
+# Args:
+# None
+# Outputs:
+# string: the temporary directory that was created
+_shunit_mktempDir() {
+ # Try the standard `mktemp` function.
+ ( exec mktemp -dqt shunit.XXXXXX 2>/dev/null ) && return
+
+ # The standard `mktemp` didn't work. Use our own.
+ # shellcheck disable=SC2039
+ if command [ -r '/dev/urandom' -a -x '/usr/bin/od' ]; then
+ _shunit_random_=`/usr/bin/od -vAn -N4 -tx4 </dev/urandom \
+ |command sed 's/^[^0-9a-f]*//'`
+ elif command [ -n "${RANDOM:-}" ]; then
+ # $RANDOM works
+ _shunit_random_=${RANDOM}${RANDOM}${RANDOM}$$
+ else
+ # `$RANDOM` doesn't work.
+ _shunit_date_=`date '+%Y%m%d%H%M%S'`
+ _shunit_random_=`expr "${_shunit_date_}" / $$`
+ fi
+
+ _shunit_tmpDir_="${TMPDIR:-/tmp}/shunit.${_shunit_random_}"
+ ( umask 077 && command mkdir "${_shunit_tmpDir_}" ) || \
+ _shunit_fatal 'could not create temporary directory! exiting'
+
+ echo "${_shunit_tmpDir_}"
+ unset _shunit_date_ _shunit_random_ _shunit_tmpDir_
+}
+
+# This function is here to work around issues in Cygwin.
+#
+# Args:
+# None
+_shunit_mktempFunc() {
+ for _shunit_func_ in oneTimeSetUp oneTimeTearDown setUp tearDown suite noexec
+ do
+ _shunit_file_="${__shunit_tmpDir}/${_shunit_func_}"
+ command cat <<EOF >"${_shunit_file_}"
+#! /bin/sh
+exit ${SHUNIT_TRUE}
+EOF
+ command chmod +x "${_shunit_file_}"
+ done
+
+ unset _shunit_file_
+}
+
+# Final cleanup function to leave things as we found them.
+#
+# Besides removing the temporary directory, this function is in charge of the
+# final exit code of the unit test. The exit code is based on how the script
+# was ended (e.g. normal exit, or via Ctrl-C).
+#
+# Args:
+# name: string: name of the trap called (specified when trap defined)
+_shunit_cleanup() {
+ _shunit_name_=$1
+
+ case "${_shunit_name_}" in
+ EXIT) ;;
+ INT) _shunit_signal_=130 ;; # 2+128
+ TERM) _shunit_signal_=143 ;; # 15+128
+ *)
+ _shunit_error "unrecognized trap value (${_shunit_name_})"
+ _shunit_signal_=0
+ ;;
+ esac
+ if command [ "${_shunit_name_}" != 'EXIT' ]; then
+ _shunit_warn "trapped and now handling the (${_shunit_name_}) signal"
+ fi
+
+ # Do our work.
+ if command [ ${__shunit_clean} -eq ${SHUNIT_FALSE} ]; then
+ # Ensure tear downs are only called once.
+ __shunit_clean=${SHUNIT_TRUE}
+
+ tearDown
+ command [ $? -eq ${SHUNIT_TRUE} ] \
+ || _shunit_warn "tearDown() returned non-zero return code."
+ oneTimeTearDown
+ command [ $? -eq ${SHUNIT_TRUE} ] \
+ || _shunit_warn "oneTimeTearDown() returned non-zero return code."
+
+ command rm -fr "${__shunit_tmpDir}"
+ fi
+
+ if command [ "${_shunit_name_}" != 'EXIT' ]; then
+ # Handle all non-EXIT signals.
+ trap - 0 # Disable EXIT trap.
+ exit ${_shunit_signal_}
+ elif command [ ${__shunit_reportGenerated} -eq ${SHUNIT_FALSE} ]; then
+ _shunit_assertFail 'unknown failure encountered running a test'
+ _shunit_generateReport
+ exit ${SHUNIT_ERROR}
+ fi
+
+ unset _shunit_name_ _shunit_signal_
+}
+
+# configureColor based on user color preference.
+#
+# Args:
+# color: string: color mode (one of `always`, `auto`, or `none`).
+_shunit_configureColor() {
+ _shunit_color_=${SHUNIT_FALSE} # By default, no color.
+ case $1 in
+ 'always') _shunit_color_=${SHUNIT_TRUE} ;;
+ 'auto')
+ command [ "`_shunit_colors`" -ge 8 ] && _shunit_color_=${SHUNIT_TRUE}
+ ;;
+ 'none') ;;
+ *) _shunit_fatal "unrecognized color option '$1'" ;;
+ esac
+
+ case ${_shunit_color_} in
+ ${SHUNIT_TRUE})
+ __shunit_ansi_none=${__SHUNIT_ANSI_NONE}
+ __shunit_ansi_red=${__SHUNIT_ANSI_RED}
+ __shunit_ansi_green=${__SHUNIT_ANSI_GREEN}
+ __shunit_ansi_yellow=${__SHUNIT_ANSI_YELLOW}
+ __shunit_ansi_cyan=${__SHUNIT_ANSI_CYAN}
+ ;;
+ ${SHUNIT_FALSE})
+ __shunit_ansi_none=''
+ __shunit_ansi_red=''
+ __shunit_ansi_green=''
+ __shunit_ansi_yellow=''
+ __shunit_ansi_cyan=''
+ ;;
+ esac
+
+ unset _shunit_color_ _shunit_tput_
+}
+
+# colors returns the number of supported colors for the TERM.
+_shunit_colors() {
+ _shunit_tput_=`${SHUNIT_CMD_TPUT} colors 2>/dev/null`
+ if command [ $? -eq 0 ]; then
+ echo "${_shunit_tput_}"
+ else
+ echo 16
+ fi
+ unset _shunit_tput_
+}
+
+# The actual running of the tests happens here.
+#
+# Args:
+# None
+_shunit_execSuite() {
+ for _shunit_test_ in ${__shunit_suite}; do
+ __shunit_testSuccess=${SHUNIT_TRUE}
+
+ # Disable skipping.
+ endSkipping
+
+ # Execute the per-test setup function.
+ setUp
+ command [ $? -eq ${SHUNIT_TRUE} ] \
+ || _shunit_fatal "setup() returned non-zero return code."
+
+ # Execute the test.
+ echo "${__SHUNIT_TEST_PREFIX}${_shunit_test_}"
+ eval "${_shunit_test_}"
+ if command [ $? -ne ${SHUNIT_TRUE} ]; then
+ _shunit_error "${_shunit_test_}() returned non-zero return code."
+ __shunit_testSuccess=${SHUNIT_ERROR}
+ _shunit_incFailedCount
+ fi
+
+ # Execute the per-test tear-down function.
+ tearDown
+ command [ $? -eq ${SHUNIT_TRUE} ] \
+ || _shunit_fatal "tearDown() returned non-zero return code."
+
+ # Update stats.
+ if command [ ${__shunit_testSuccess} -eq ${SHUNIT_TRUE} ]; then
+ __shunit_testsPassed=`expr ${__shunit_testsPassed} + 1`
+ else
+ __shunit_testsFailed=`expr ${__shunit_testsFailed} + 1`
+ fi
+ done
+
+ unset _shunit_test_
+}
+
+# Generates the user friendly report with appropriate OK/FAILED message.
+#
+# Args:
+# None
+# Output:
+# string: the report of successful and failed tests, as well as totals.
+_shunit_generateReport() {
+ command [ "${__shunit_reportGenerated}" -eq ${SHUNIT_TRUE} ] && return
+
+ _shunit_ok_=${SHUNIT_TRUE}
+
+ # If no exit code was provided, determine an appropriate one.
+ command [ "${__shunit_testsFailed}" -gt 0 \
+ -o ${__shunit_testSuccess} -eq ${SHUNIT_FALSE} ] \
+ && _shunit_ok_=${SHUNIT_FALSE}
+
+ echo
+ _shunit_msg_="Ran ${__shunit_ansi_cyan}${__shunit_testsTotal}${__shunit_ansi_none}"
+ if command [ "${__shunit_testsTotal}" -eq 1 ]; then
+ ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} test."
+ else
+ ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} tests."
+ fi
+
+ if command [ ${_shunit_ok_} -eq ${SHUNIT_TRUE} ]; then
+ _shunit_msg_="${__shunit_ansi_green}OK${__shunit_ansi_none}"
+ command [ "${__shunit_assertsSkipped}" -gt 0 ] \
+ && _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none})"
+ else
+ _shunit_msg_="${__shunit_ansi_red}FAILED${__shunit_ansi_none}"
+ _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_red}failures=${__shunit_assertsFailed}${__shunit_ansi_none}"
+ command [ "${__shunit_assertsSkipped}" -gt 0 ] \
+ && _shunit_msg_="${_shunit_msg_},${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none}"
+ _shunit_msg_="${_shunit_msg_})"
+ fi
+
+ echo
+ ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_}"
+ __shunit_reportGenerated=${SHUNIT_TRUE}
+
+ unset _shunit_msg_ _shunit_ok_
+}
+
+# Test for whether a function should be skipped.
+#
+# Args:
+# None
+# Returns:
+# boolean: whether the test should be skipped (TRUE/FALSE constant)
+_shunit_shouldSkip() {
+ command [ ${__shunit_skip} -eq ${SHUNIT_FALSE} ] && return ${SHUNIT_FALSE}
+ _shunit_assertSkip
+}
+
+# Records a successful test.
+#
+# Args:
+# None
+_shunit_assertPass() {
+ __shunit_assertsPassed=`expr ${__shunit_assertsPassed} + 1`
+ __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
+}
+
+# Records a test failure.
+#
+# Args:
+# message: string: failure message to provide user
+_shunit_assertFail() {
+ __shunit_testSuccess=${SHUNIT_FALSE}
+ _shunit_incFailedCount
+
+ \[ $# -gt 0 ] && ${__SHUNIT_CMD_ECHO_ESC} \
+ "${__shunit_ansi_red}ASSERT:${__shunit_ansi_none}$*"
+}
+
+# Increment the count of failed asserts.
+#
+# Args:
+# none
+_shunit_incFailedCount() {
+ __shunit_assertsFailed=`expr "${__shunit_assertsFailed}" + 1`
+ __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1`
+}
+
+
+# Records a skipped test.
+#
+# Args:
+# None
+_shunit_assertSkip() {
+ __shunit_assertsSkipped=`expr "${__shunit_assertsSkipped}" + 1`
+ __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1`
+}
+
+# Prepare a script filename for sourcing.
+#
+# Args:
+# script: string: path to a script to source
+# Returns:
+# string: filename prefixed with ./ (if necessary)
+_shunit_prepForSourcing() {
+ _shunit_script_=$1
+ case "${_shunit_script_}" in
+ /*|./*) echo "${_shunit_script_}" ;;
+ *) echo "./${_shunit_script_}" ;;
+ esac
+ unset _shunit_script_
+}
+
+# Escape a character in a string.
+#
+# Args:
+# c: string: unescaped character
+# s: string: to escape character in
+# Returns:
+# string: with escaped character(s)
+_shunit_escapeCharInStr() {
+ command [ -n "$2" ] || return # No point in doing work on an empty string.
+
+ # Note: using shorter variable names to prevent conflicts with
+ # _shunit_escapeCharactersInString().
+ _shunit_c_=$1
+ _shunit_s_=$2
+
+ # Escape the character.
+ # shellcheck disable=SC1003,SC2086
+ echo ''${_shunit_s_}'' |command sed 's/\'${_shunit_c_}'/\\\'${_shunit_c_}'/g'
+
+ unset _shunit_c_ _shunit_s_
+}
+
+# Escape a character in a string.
+#
+# Args:
+# str: string: to escape characters in
+# Returns:
+# string: with escaped character(s)
+_shunit_escapeCharactersInString() {
+ command [ -n "$1" ] || return # No point in doing work on an empty string.
+
+ _shunit_str_=$1
+
+ # Note: using longer variable names to prevent conflicts with
+ # _shunit_escapeCharInStr().
+ for _shunit_char_ in '"' '$' "'" '`'; do
+ _shunit_str_=`_shunit_escapeCharInStr "${_shunit_char_}" "${_shunit_str_}"`
+ done
+
+ echo "${_shunit_str_}"
+ unset _shunit_char_ _shunit_str_
+}
+
+# Extract list of functions to run tests against.
+#
+# Args:
+# script: string: name of script to extract functions from
+# Returns:
+# string: of function names
+_shunit_extractTestFunctions() {
+ _shunit_script_=$1
+
+ # Extract the lines with test function names, strip of anything besides the
+ # function name, and output everything on a single line.
+ _shunit_regex_='^\s*((function test[A-Za-z0-9_-]*)|(test[A-Za-z0-9_-]* *\(\)))'
+ # shellcheck disable=SC2196
+ egrep "${_shunit_regex_}" "${_shunit_script_}" \
+ |command sed 's/^[^A-Za-z0-9_-]*//;s/^function //;s/\([A-Za-z0-9_-]*\).*/\1/g' \
+ |xargs
+
+ unset _shunit_regex_ _shunit_script_
+}
+
+#------------------------------------------------------------------------------
+# Main.
+#
+
+# Determine the operating mode.
+if command [ $# -eq 0 -o "${1:-}" = '--' ]; then
+ __shunit_script=${__SHUNIT_PARENT}
+ __shunit_mode=${__SHUNIT_MODE_SOURCED}
+else
+ __shunit_script=$1
+ command [ -r "${__shunit_script}" ] || \
+ _shunit_fatal "unable to read from ${__shunit_script}"
+ __shunit_mode=${__SHUNIT_MODE_STANDALONE}
+fi
+
+# Create a temporary storage location.
+__shunit_tmpDir=`_shunit_mktempDir`
+
+# Provide a public temporary directory for unit test scripts.
+# TODO(kward): document this.
+SHUNIT_TMPDIR="${__shunit_tmpDir}/tmp"
+command mkdir "${SHUNIT_TMPDIR}"
+
+# Setup traps to clean up after ourselves.
+trap '_shunit_cleanup EXIT' 0
+trap '_shunit_cleanup INT' 2
+trap '_shunit_cleanup TERM' 15
+
+# Create phantom functions to work around issues with Cygwin.
+_shunit_mktempFunc
+PATH="${__shunit_tmpDir}:${PATH}"
+
+# Make sure phantom functions are executable. This will bite if `/tmp` (or the
+# current `$TMPDIR`) points to a path on a partition that was mounted with the
+# 'noexec' option. The noexec command was created with `_shunit_mktempFunc()`.
+noexec 2>/dev/null || _shunit_fatal \
+ 'Please declare TMPDIR with path on partition with exec permission.'
+
+# We must manually source the tests in standalone mode.
+if command [ "${__shunit_mode}" = "${__SHUNIT_MODE_STANDALONE}" ]; then
+ # shellcheck disable=SC1090
+ command . "`_shunit_prepForSourcing \"${__shunit_script}\"`"
+fi
+
+# Configure default output coloring behavior.
+_shunit_configureColor "${SHUNIT_COLOR}"
+
+# Execute the oneTimeSetUp function (if it exists).
+oneTimeSetUp
+command [ $? -eq ${SHUNIT_TRUE} ] \
+ || _shunit_fatal "oneTimeSetUp() returned non-zero return code."
+
+# Command line selected tests or suite selected tests
+if command [ "$#" -ge 2 ]; then
+ # Argument $1 is either the filename of tests or '--'; either way, skip it.
+ shift
+ # Remaining arguments ($2 .. $#) are assumed to be test function names.
+ # Interate through all remaining args in "$@" in a POSIX (likely portable) way.
+ # Helpful tip: https://unix.stackexchange.com/questions/314032/how-to-use-arguments-like-1-2-in-a-for-loop
+ for _shunit_arg_ do
+ suite_addTest "${_shunit_arg_}"
+ done
+ unset _shunit_arg_
+else
+ # Execute the suite function defined in the parent test script.
+ # DEPRECATED as of 2.1.0.
+ suite
+fi
+
+# If no tests or suite specified, dynamically build a list of functions.
+if command [ -z "${__shunit_suite}" ]; then
+ shunit_funcs_=`_shunit_extractTestFunctions "${__shunit_script}"`
+ for shunit_func_ in ${shunit_funcs_}; do
+ suite_addTest "${shunit_func_}"
+ done
+fi
+unset shunit_func_ shunit_funcs_
+
+# Execute the suite of unit tests.
+_shunit_execSuite
+
+# Execute the oneTimeTearDown function (if it exists).
+oneTimeTearDown
+command [ $? -eq ${SHUNIT_TRUE} ] \
+ || _shunit_fatal "oneTimeTearDown() returned non-zero return code."
+
+# Generate a report summary.
+_shunit_generateReport
+
+# That's it folks.
+command [ "${__shunit_testsFailed}" -eq 0 ]
+exit $?
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_args_test.sh b/src/fluent-bit/tests/lib/shunit2/shunit2_args_test.sh
new file mode 100755
index 000000000..fc252232b
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_args_test.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+#
+# shunit2 unit test for running subset(s) of tests based upon command line arguments.
+# Also shows how non-default tests or a arbitrary subset of tests can be run.
+#
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+
+# Load test helpers.
+. ./shunit2_test_helpers
+
+# This test does not nomrally run because it does not begin "test*"
+# Will be run by settting the arguments to the script to include the name of this test.
+non_default_test() {
+ # arbitrary assert
+ assertTrue 0
+ # true intent is to set this variable, which will be tested below
+ NON_DEFAULT_TEST_RAN="yup, we ran"
+}
+
+# Test that the "non_default_test" ran, otherwise fail
+test_non_default_ran() {
+ assertNotNull "'non_default_test' did not run" "$NON_DEFAULT_TEST_RAN"
+}
+
+# fail if this test runs, which is shouldn't if args are set correctly.
+test_will_fail() {
+ fail "test_will_fail should not be run if arg-parsing works"
+}
+
+oneTimeSetUp() {
+ th_oneTimeSetUp
+ # prime with "null" value
+ NON_DEFAULT_TEST_RAN=""
+}
+
+# Load and run shunit2.
+# shellcheck disable=SC2034
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+
+# If zero/one argument(s) are provided, this test is being run in it's
+# entirety, and therefore we want to set the arguments to the script
+# to (simulate and) test the processing of command-line specified
+# tests. If we don't, then the "test_will_fail" test will run (by
+# default) and the overall test will fail.
+#
+# However, if two or more arguments are provided, then assume this
+# test script is being run by hand to experiment with command-line
+# test specification, and then don't override the user provided
+# arguments.
+if command [ "$#" -le 1 ]; then
+ # We set the arguments in a POSIX way, inasmuch as we can;
+ # helpful tip:
+ # https://unix.stackexchange.com/questions/258512/how-to-remove-a-positional-parameter-from
+ set -- "--" "non_default_test" "test_non_default_ran"
+fi
+
+# Load and run tests, but only if running as a script, not if being sourced by shunit2
+command [ -z "${SHUNIT_VERSION:-}" ] && . "${TH_SHUNIT}"
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_asserts_test.sh b/src/fluent-bit/tests/lib/shunit2/shunit2_asserts_test.sh
new file mode 100755
index 000000000..7b982ead4
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_asserts_test.sh
@@ -0,0 +1,258 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# shunit2 unit test for assert functions.
+#
+# Copyright 2008-2017 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shunit2
+#
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+
+# These variables will be overridden by the test helpers.
+stdoutF="${TMPDIR:-/tmp}/STDOUT"
+stderrF="${TMPDIR:-/tmp}/STDERR"
+
+# Load test helpers.
+. ./shunit2_test_helpers
+
+commonEqualsSame() {
+ fn=$1
+
+ ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'equal' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'equal; with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} 'abc def' 'abc def' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'equal with spaces' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not equal' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+commonNotEqualsSame() {
+ fn=$1
+
+ ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not same' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} "${MSG}" 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not same, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertEquals() {
+ commonEqualsSame 'assertEquals'
+}
+
+testAssertNotEquals() {
+ commonNotEqualsSame 'assertNotEquals'
+}
+
+testAssertSame() {
+ commonEqualsSame 'assertSame'
+}
+
+testAssertNotSame() {
+ commonNotEqualsSame 'assertNotSame'
+}
+
+testAssertContains() {
+ ( assertContains 'abcdef' 'abc' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' 'bcd' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' 'def' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abc -Xabc def' '-Xabc' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'content starts with "-"' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains "${MSG}" 'abcdef' 'abc' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'found, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' 'xyz' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' 'zab' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' 'efg' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' 'acf' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains 'abcdef' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertContains arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertNotContains() {
+ ( assertNotContains 'abcdef' 'xyz' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains 'abcdef' 'zab' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains 'abcdef' 'efg' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains 'abcdef' 'acf' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains "${MSG}" 'abcdef' 'xyz' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not found, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains 'abcdef' 'abc' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'found' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains 'abcdef' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotContains arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertNull() {
+ ( assertNull '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull "${MSG}" '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'null, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertNotNull()
+{
+ ( assertNotNull 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull "${MSG}" 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull 'x"b' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with double-quote' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull "x'b" >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with single-quote' $? \
+ "${stdoutF}" "${stderrF}"
+
+ # shellcheck disable=SC2016
+ ( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with dollar' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull 'x`b' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with backtick' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
+
+ # There is no test for too few arguments as $1 might actually be null.
+
+ ( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertTrue() {
+ ( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue "${MSG}" 0 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'true, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'true condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue 1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'false' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'false condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertFalse() {
+ ( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse "${MSG}" 1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'false, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'false condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse 0 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'true' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+oneTimeSetUp() {
+ th_oneTimeSetUp
+
+ MSG='This is a test message'
+}
+
+# Load and run shunit2.
+# shellcheck disable=SC2034
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. "${TH_SHUNIT}"
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_failures_test.sh b/src/fluent-bit/tests/lib/shunit2/shunit2_failures_test.sh
new file mode 100755
index 000000000..3cedd2fa5
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_failures_test.sh
@@ -0,0 +1,85 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008-2019 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# shUnit2 -- Unit testing framework for Unix shell scripts.
+# https://github.com/kward/shunit2
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test for failure functions
+#
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+
+# These variables will be overridden by the test helpers.
+stdoutF="${TMPDIR:-/tmp}/STDOUT"
+stderrF="${TMPDIR:-/tmp}/STDERR"
+
+# Load test helpers.
+. ./shunit2_test_helpers
+
+testFail() {
+ ( fail >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}"
+
+ ( fail "${MSG}" >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'fail with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( fail arg1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testFailNotEquals() {
+ ( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testFailSame() {
+ ( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+oneTimeSetUp() {
+ th_oneTimeSetUp
+
+ MSG='This is a test message'
+}
+
+# Load and run shUnit2.
+# shellcheck disable=SC2034
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. "${TH_SHUNIT}"
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_macros_test.sh b/src/fluent-bit/tests/lib/shunit2/shunit2_macros_test.sh
new file mode 100755
index 000000000..dba7c9bf8
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_macros_test.sh
@@ -0,0 +1,265 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# shunit2 unit test for macros.
+#
+# Copyright 2008-2017 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shunit2
+#
+### ShellCheck http://www.shellcheck.net/
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+# Presence of LINENO variable is checked.
+# shellcheck disable=SC2039
+
+# These variables will be overridden by the test helpers.
+stdoutF="${TMPDIR:-/tmp}/STDOUT"
+stderrF="${TMPDIR:-/tmp}/STDERR"
+
+# Load test helpers.
+. ./shunit2_test_helpers
+
+testAssertEquals() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_EQUALS_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testAssertNotEquals() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testSame() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_SAME_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testNotSame() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testNull() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NULL_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testNotNull()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NOT_NULL_} '' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stdoutF}" "${stderrF}" >&2
+
+ return 0
+}
+
+testAssertTrue() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_TRUE_} "${SHUNIT_FALSE}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_TRUE_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_TRUE_} '"some msg"' "${SHUNIT_FALSE}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testAssertFalse() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_FALSE_} "${SHUNIT_TRUE}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_FALSE_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_FALSE_} '"some msg"' "${SHUNIT_TRUE}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testFail() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testFailNotEquals()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_NOT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testFailSame() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_SAME_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+testFailNotSame() {
+ # Start skipping if LINENO not available.
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn}
+ [ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
+
+ return 0
+}
+
+oneTimeSetUp() {
+ th_oneTimeSetUp
+}
+
+# Disable output coloring as it breaks the tests.
+SHUNIT_COLOR='none'; export SHUNIT_COLOR
+
+# Load and run shUnit2.
+# shellcheck disable=SC2034
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT="$0"
+. "${TH_SHUNIT}"
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_misc_test.sh b/src/fluent-bit/tests/lib/shunit2/shunit2_misc_test.sh
new file mode 100755
index 000000000..54267313a
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_misc_test.sh
@@ -0,0 +1,315 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# shUnit2 unit tests of miscellaneous things
+#
+# Copyright 2008-2018 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shunit2
+#
+### ShellCheck http://www.shellcheck.net/
+# $() are not fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+# Not wanting to escape single quotes.
+# shellcheck disable=SC1003
+
+# These variables will be overridden by the test helpers.
+stdoutF="${TMPDIR:-/tmp}/STDOUT"
+stderrF="${TMPDIR:-/tmp}/STDERR"
+
+# Load test helpers.
+. ./shunit2_test_helpers
+
+# Note: the test script is prefixed with '#' chars so that shUnit2 does not
+# incorrectly interpret the embedded functions as real functions.
+testUnboundVariable() {
+ unittestF="${SHUNIT_TMPDIR}/unittest"
+ sed 's/^#//' >"${unittestF}" <<EOF
+## Treat unset variables as an error when performing parameter expansion.
+#set -u
+#
+#boom() { x=\$1; } # This function goes boom if no parameters are passed!
+#test_boom() {
+# assertEquals 1 1
+# boom # No parameter given
+# assertEquals 0 \$?
+#}
+#SHUNIT_COLOR='none'
+#. ${TH_SHUNIT}
+EOF
+ ( exec "${SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
+ assertFalse 'expected a non-zero exit value' $?
+ grep '^ASSERT:unknown failure' "${stdoutF}" >/dev/null
+ assertTrue 'assert message was not generated' $?
+ grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
+ assertTrue 'test count message was not generated' $?
+ grep '^FAILED' "${stdoutF}" >/dev/null
+ assertTrue 'failure message was not generated' $?
+}
+
+# assertEquals repeats message argument.
+# https://github.com/kward/shunit2/issues/7
+testIssue7() {
+ # Disable coloring so 'ASSERT:' lines can be matched correctly.
+ _shunit_configureColor 'none'
+
+ ( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
+ diff "${stdoutF}" - >/dev/null <<EOF
+ASSERT:Some message. expected:<1> but was:<2>
+EOF
+ rtrn=$?
+ assertEquals "${SHUNIT_TRUE}" "${rtrn}"
+ [ "${rtrn}" -eq "${SHUNIT_TRUE}" ] || cat "${stderrF}" >&2
+}
+
+# Support prefixes on test output.
+# https://github.com/kward/shunit2/issues/29
+testIssue29() {
+ unittestF="${SHUNIT_TMPDIR}/unittest"
+ sed 's/^#//' >"${unittestF}" <<EOF
+## Support test prefixes.
+#test_assert() { assertTrue ${SHUNIT_TRUE}; }
+#SHUNIT_COLOR='none'
+#SHUNIT_TEST_PREFIX='--- '
+#. ${TH_SHUNIT}
+EOF
+ ( exec "${SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^--- test_assert' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertEquals "${SHUNIT_TRUE}" "${rtrn}"
+ [ "${rtrn}" -eq "${SHUNIT_TRUE}" ] || cat "${stdoutF}" >&2
+}
+
+# shUnit2 should not exit with 0 when it has syntax errors.
+# https://github.com/kward/shunit2/issues/69
+testIssue69() {
+ unittestF="${SHUNIT_TMPDIR}/unittest"
+
+ for t in Equals NotEquals Null NotNull Same NotSame True False; do
+ assert="assert${t}"
+ sed 's/^#//' >"${unittestF}" <<EOF
+## Asserts with invalid argument counts should be counted as failures.
+#test_assert() { ${assert}; }
+#SHUNIT_COLOR='none'
+#. ${TH_SHUNIT}
+EOF
+ ( exec "${SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^FAILED' "${stdoutF}" >/dev/null
+ assertTrue "failure message for ${assert} was not generated" $?
+ done
+}
+
+# Ensure that test fails if setup/teardown functions fail.
+testIssue77() {
+ unittestF="${SHUNIT_TMPDIR}/unittest"
+ for func in oneTimeSetUp setUp tearDown oneTimeTearDown; do
+ sed 's/^#//' >"${unittestF}" <<EOF
+## Environment failure should end test.
+#${func}() { return ${SHUNIT_FALSE}; }
+#test_true() { assertTrue ${SHUNIT_TRUE}; }
+#SHUNIT_COLOR='none'
+#. ${TH_SHUNIT}
+EOF
+ ( exec "${SHELL:-sh}" "${unittestF}" ) >"${stdoutF}" 2>"${stderrF}"
+ grep '^FAILED' "${stdoutF}" >/dev/null
+ assertTrue "failure of ${func}() did not end test" $?
+ done
+}
+
+# Ensure a test failure is recorded for code containing syntax errors.
+# https://github.com/kward/shunit2/issues/84
+testIssue84() {
+ unittestF="${SHUNIT_TMPDIR}/unittest"
+ sed 's/^#//' >"${unittestF}" <<\EOF
+## Function with syntax error.
+#syntax_error() { ${!#3442} -334 a$@2[1]; }
+#test_syntax_error() {
+# syntax_error
+# assertTrue ${SHUNIT_TRUE}
+#}
+#SHUNIT_COLOR='none'
+#SHUNIT_TEST_PREFIX='--- '
+#. ${TH_SHUNIT}
+EOF
+ ( exec "${SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
+ grep '^FAILED' "${stdoutF}" >/dev/null
+ assertTrue "failure message for ${assert} was not generated" $?
+}
+
+testPrepForSourcing() {
+ assertEquals '/abc' "`_shunit_prepForSourcing '/abc'`"
+ assertEquals './abc' "`_shunit_prepForSourcing './abc'`"
+ assertEquals './abc' "`_shunit_prepForSourcing 'abc'`"
+}
+
+testEscapeCharInStr() {
+ while read -r desc char str want; do
+ got=`_shunit_escapeCharInStr "${char}" "${str}"`
+ assertEquals "${desc}" "${want}" "${got}"
+ done <<'EOF'
+backslash \ '' ''
+backslash_pre \ \def \\def
+backslash_mid \ abc\def abc\\def
+backslash_post \ abc\ abc\\
+quote " '' ''
+quote_pre " "def \"def
+quote_mid " abc"def abc\"def
+quote_post " abc" abc\"
+string $ '' ''
+string_pre $ $def \$def
+string_mid $ abc$def abc\$def
+string_post $ abc$ abc\$
+EOF
+
+ # TODO(20170924:kward) fix or remove.
+# actual=`_shunit_escapeCharInStr "'" ''`
+# assertEquals '' "${actual}"
+# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
+# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
+# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
+
+# # Must put the backtick in a variable so the shell doesn't misinterpret it
+# # while inside a backticked sequence (e.g. `echo '`'` would fail).
+# backtick='`'
+# actual=`_shunit_escapeCharInStr ${backtick} ''`
+# assertEquals '' "${actual}"
+# assertEquals '\`abc' \
+# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
+# assertEquals 'abc\`' \
+# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
+# assertEquals 'abc\`def' \
+# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
+}
+
+testEscapeCharInStr_specialChars() {
+ # Make sure our forward slash doesn't upset sed.
+ assertEquals '/' "`_shunit_escapeCharInStr '\' '/'`"
+
+ # Some shells escape these differently.
+ # TODO(20170924:kward) fix or remove.
+ #assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
+ #assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
+}
+
+# Test the various ways of declaring functions.
+#
+# Prefixing (then stripping) with comment symbol so these functions aren't
+# treated as real functions by shUnit2.
+testExtractTestFunctions() {
+ f="${SHUNIT_TMPDIR}/extract_test_functions"
+ sed 's/^#//' <<EOF >"${f}"
+## Function on a single line.
+#testABC() { echo 'ABC'; }
+## Multi-line function with '{' on next line.
+#test_def()
+# {
+# echo 'def'
+#}
+## Multi-line function with '{' on first line.
+#testG3 () {
+# echo 'G3'
+#}
+## Function with numerical values in name.
+#function test4() { echo '4'; }
+## Leading space in front of function.
+# test5() { echo '5'; }
+## Function with '_' chars in name.
+#some_test_function() { echo 'some func'; }
+## Function that sets variables.
+#func_with_test_vars() {
+# testVariable=1234
+#}
+## Function with keyword but no parenthesis
+#function test6 { echo '6'; }
+## Function with keyword but no parenthesis, multi-line
+#function test7 {
+# echo '7';
+#}
+## Function with no parenthesis, '{' on next line
+#function test8
+#{
+# echo '8'
+#}
+## Function with hyphenated name
+#test-9() {
+# echo '9';
+#}
+## Function without parenthesis or keyword
+#test_foobar { echo 'hello world'; }
+## Function with multiple function keywords
+#function function test_test_test() { echo 'lorem'; }
+EOF
+
+ actual=`_shunit_extractTestFunctions "${f}"`
+ assertEquals 'testABC test_def testG3 test4 test5 test6 test7 test8 test-9' "${actual}"
+}
+
+# Test that certain external commands sometimes "stubbed" by users are escaped.
+testIssue54() {
+ for c in mkdir rm cat chmod sed; do
+ grep "^[^#]*${c} " "${TH_SHUNIT}" | grep -qv "command ${c}"
+ assertFalse "external call to ${c} not protected somewhere" $?
+ done
+ grep '^[^#]*[^ ] *\[' "${TH_SHUNIT}" | grep -qv 'command \['
+ assertFalse "call to [ ... ] not protected somewhere" $?
+ grep '^[^#]* *\.' "${TH_SHUNIT}" | grep -qv 'command \.'
+ assertFalse "call to . not protected somewhere" $?
+}
+
+mock_tput() {
+ if [ -z "${TERM}" ]; then
+ # shellcheck disable=SC2016
+ echo 'tput: No value for $TERM and no -T specified'
+ return 2
+ fi
+ if [ "$1" = 'colors' ]; then
+ echo 256
+ return 0
+ fi
+ return 1
+}
+
+testColors() {
+ while read -r desc cmd colors; do
+ SHUNIT_CMD_TPUT=${cmd}
+ got=`_shunit_colors`
+ want=${colors}
+ assertEquals "${got}" "${want}"
+ done <<'EOF'
+missing missing_tput 16
+mock mock_tput 256
+EOF
+}
+
+testColorsWitoutTERM() {
+ SHUNIT_CMD_TPUT='mock_tput'
+ got=`TERM='' _shunit_colors`
+ want=16
+ assertEquals "${got}" "${want}"
+}
+
+setUp() {
+ for f in "${stdoutF}" "${stderrF}"; do
+ cp /dev/null "${f}"
+ done
+
+ # Reconfigure coloring as some tests override default behavior.
+ _shunit_configureColor "${SHUNIT_COLOR_DEFAULT}"
+
+ # shellcheck disable=SC2034,SC2153
+ SHUNIT_CMD_TPUT=${__SHUNIT_CMD_TPUT}
+}
+
+oneTimeSetUp() {
+ SHUNIT_COLOR_DEFAULT="${SHUNIT_COLOR}"
+ th_oneTimeSetUp
+}
+
+# Load and run shUnit2.
+# shellcheck disable=SC2034
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. "${TH_SHUNIT}"
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_standalone_test.sh b/src/fluent-bit/tests/lib/shunit2/shunit2_standalone_test.sh
new file mode 100755
index 000000000..2109d8f7f
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_standalone_test.sh
@@ -0,0 +1,38 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# shUnit2 unit test for standalone operation.
+#
+# Copyright 2010-2017 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shunit2
+#
+# This unit test is purely to test that calling shunit2 directly, while passing
+# the name of a unit test script, works. When run, this script determines if it
+# is running as a standalone program, and calls main() if it is.
+#
+### ShellCheck http://www.shellcheck.net/
+# $() are not fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+
+ARGV0="`basename "$0"`"
+
+# Load test helpers.
+. ./shunit2_test_helpers
+
+testStandalone() {
+ assertTrue "${SHUNIT_TRUE}"
+}
+
+main() {
+ ${TH_SHUNIT} "${ARGV0}"
+}
+
+# Are we running as a standalone?
+if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then
+ if [ $# -gt 0 ]; then main "$@"; else main; fi
+fi
diff --git a/src/fluent-bit/tests/lib/shunit2/shunit2_test_helpers b/src/fluent-bit/tests/lib/shunit2/shunit2_test_helpers
new file mode 100644
index 000000000..7ff4c9cc5
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/shunit2_test_helpers
@@ -0,0 +1,234 @@
+# vim:et:ft=sh:sts=2:sw=2
+#
+# shUnit2 unit test common functions
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shunit2
+#
+### ShellCheck (http://www.shellcheck.net/)
+# Commands are purposely escaped so they can be mocked outside shUnit2.
+# shellcheck disable=SC1001,SC1012
+# expr may be antiquated, but it is the only solution in some cases.
+# shellcheck disable=SC2003
+# $() are not fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+
+# Treat unset variables as an error when performing parameter expansion.
+set -u
+
+# Set shwordsplit for zsh.
+\[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
+
+#
+# Constants.
+#
+
+# Path to shUnit2 library. Can be overridden by setting SHUNIT_INC.
+TH_SHUNIT=${SHUNIT_INC:-./shunit2}; export TH_SHUNIT
+
+# Configure debugging. Set the DEBUG environment variable to any
+# non-empty value to enable debug output, or TRACE to enable trace
+# output.
+TRACE=${TRACE:+'th_trace '}
+\[ -n "${TRACE}" ] && DEBUG=1
+\[ -z "${TRACE}" ] && TRACE=':'
+
+DEBUG=${DEBUG:+'th_debug '}
+\[ -z "${DEBUG}" ] && DEBUG=':'
+
+#
+# Variables.
+#
+
+th_RANDOM=0
+
+#
+# Functions.
+#
+
+# Logging functions.
+th_trace() { echo "${MY_NAME}:TRACE $*" >&2; }
+th_debug() { echo "${MY_NAME}:DEBUG $*" >&2; }
+th_info() { echo "${MY_NAME}:INFO $*" >&2; }
+th_warn() { echo "${MY_NAME}:WARN $*" >&2; }
+th_error() { echo "${MY_NAME}:ERROR $*" >&2; }
+th_fatal() { echo "${MY_NAME}:FATAL $*" >&2; }
+
+# Output subtest name.
+th_subtest() { echo " $*" >&2; }
+
+th_oneTimeSetUp() {
+ # These files will be cleaned up automatically by shUnit2.
+ stdoutF="${SHUNIT_TMPDIR}/stdout"
+ stderrF="${SHUNIT_TMPDIR}/stderr"
+ returnF="${SHUNIT_TMPDIR}/return"
+ expectedF="${SHUNIT_TMPDIR}/expected"
+ export stdoutF stderrF returnF expectedF
+}
+
+# Generate a random number.
+th_generateRandom() {
+ tfgr_random=${th_RANDOM}
+
+ while \[ "${tfgr_random}" = "${th_RANDOM}" ]; do
+ # shellcheck disable=SC2039
+ if \[ -n "${RANDOM:-}" ]; then
+ # $RANDOM works
+ # shellcheck disable=SC2039
+ tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$
+ elif \[ -r '/dev/urandom' ]; then
+ tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'`
+ else
+ tfgr_date=`date '+%H%M%S'`
+ tfgr_random=`expr "${tfgr_date}" \* $$`
+ unset tfgr_date
+ fi
+ \[ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1
+ done
+
+ th_RANDOM=${tfgr_random}
+ unset tfgr_random
+}
+
+# This section returns the data section from the specified section of a file. A
+# data section is defined by a [header], one or more lines of data, and then a
+# blank line.
+th_getDataSect() {
+ th_sgrep "\\[$1\\]" "$2" |sed '1d'
+}
+
+# This function greps a section from a file. a section is defined as a group of
+# lines preceded and followed by blank lines..
+th_sgrep() {
+ th_pattern_=$1
+ shift
+
+ # shellcheck disable=SC2068
+ sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d'
+
+ unset th_pattern_
+}
+
+# Custom assert that checks for true return value (0), and no output to STDOUT
+# or STDERR. If a non-zero return value is encountered, the output of STDERR
+# will be output.
+#
+# Args:
+# th_test_: string: name of the subtest
+# th_rtrn_: integer: the return value of the subtest performed
+# th_stdout_: string: filename where stdout was redirected to
+# th_stderr_: string: filename where stderr was redirected to
+th_assertTrueWithNoOutput() {
+ th_test_=$1
+ th_rtrn_=$2
+ th_stdout_=$3
+ th_stderr_=$4
+
+ assertTrue "${th_test_}; expected return value of zero" "${th_rtrn_}"
+ \[ "${th_rtrn_}" -ne "${SHUNIT_TRUE}" ] && \cat "${th_stderr_}"
+ assertFalse "${th_test_}; expected no output to STDOUT" \
+ "[ -s '${th_stdout_}' ]"
+ assertFalse "${th_test_}; expected no output to STDERR" \
+ "[ -s '${th_stderr_}' ]"
+
+ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
+}
+
+# Custom assert that checks for non-zero return value, output to STDOUT, but no
+# output to STDERR.
+#
+# Args:
+# th_test_: string: name of the subtest
+# th_rtrn_: integer: the return value of the subtest performed
+# th_stdout_: string: filename where stdout was redirected to
+# th_stderr_: string: filename where stderr was redirected to
+th_assertFalseWithOutput()
+{
+ th_test_=$1
+ th_rtrn_=$2
+ th_stdout_=$3
+ th_stderr_=$4
+
+ assertFalse "${th_test_}; expected non-zero return value" "${th_rtrn_}"
+ assertTrue "${th_test_}; expected output to STDOUT" \
+ "[ -s '${th_stdout_}' ]"
+ assertFalse "${th_test_}; expected no output to STDERR" \
+ "[ -s '${th_stderr_}' ]"
+ \[ -s "${th_stdout_}" -a ! -s "${th_stderr_}" ] || \
+ _th_showOutput "${SHUNIT_FALSE}" "${th_stdout_}" "${th_stderr_}"
+
+ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
+}
+
+# Custom assert that checks for non-zero return value, no output to STDOUT, but
+# output to STDERR.
+#
+# Args:
+# th_test_: string: name of the subtest
+# th_rtrn_: integer: the return value of the subtest performed
+# th_stdout_: string: filename where stdout was redirected to
+# th_stderr_: string: filename where stderr was redirected to
+th_assertFalseWithError() {
+ th_test_=$1
+ th_rtrn_=$2
+ th_stdout_=$3
+ th_stderr_=$4
+
+ assertFalse "${th_test_}; expected non-zero return value" "${th_rtrn_}"
+ assertFalse "${th_test_}; expected no output to STDOUT" \
+ "[ -s '${th_stdout_}' ]"
+ assertTrue "${th_test_}; expected output to STDERR" \
+ "[ -s '${th_stderr_}' ]"
+ \[ ! -s "${th_stdout_}" -a -s "${th_stderr_}" ] || \
+ _th_showOutput "${SHUNIT_FALSE}" "${th_stdout_}" "${th_stderr_}"
+
+ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
+}
+
+# Some shells, zsh on Solaris in particular, return immediately from a sub-shell
+# when a non-zero return value is encountered. To properly catch these values,
+# they are either written to disk, or recognized as an error the file is empty.
+th_clearReturn() { cp /dev/null "${returnF}"; }
+th_queryReturn() {
+ if \[ -s "${returnF}" ]; then
+ th_return=`\cat "${returnF}"`
+ else
+ th_return=${SHUNIT_ERROR}
+ fi
+ export th_return
+}
+
+# Providing external and internal calls to the showOutput helper function.
+th_showOutput() { _th_showOutput "$@"; }
+_th_showOutput() {
+ _th_return_=$1
+ _th_stdout_=$2
+ _th_stderr_=$3
+
+ isSkipping
+ if \[ $? -eq "${SHUNIT_FALSE}" -a "${_th_return_}" != "${SHUNIT_TRUE}" ]; then
+ if \[ -n "${_th_stdout_}" -a -s "${_th_stdout_}" ]; then
+ echo '>>> STDOUT' >&2
+ \cat "${_th_stdout_}" >&2
+ fi
+ if \[ -n "${_th_stderr_}" -a -s "${_th_stderr_}" ]; then
+ echo '>>> STDERR' >&2
+ \cat "${_th_stderr_}" >&2
+ fi
+ if \[ -n "${_th_stdout_}" -o -n "${_th_stderr_}" ]; then
+ echo '<<< end output' >&2
+ fi
+ fi
+
+ unset _th_return_ _th_stdout_ _th_stderr_
+}
+
+#
+# Main.
+#
+
+${TRACE} 'trace output enabled'
+${DEBUG} 'debug output enabled'
diff --git a/src/fluent-bit/tests/lib/shunit2/test_runner b/src/fluent-bit/tests/lib/shunit2/test_runner
new file mode 100755
index 000000000..a9871e3f9
--- /dev/null
+++ b/src/fluent-bit/tests/lib/shunit2/test_runner
@@ -0,0 +1,171 @@
+#! /bin/sh
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Unit test suite runner.
+#
+# Copyright 2008-2018 Kate Ward. All Rights Reserved.
+# Released under the Apache 2.0 license.
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+# https://github.com/kward/shlib
+#
+# This script runs all the unit tests that can be found, and generates a nice
+# report of the tests.
+#
+### ShellCheck (http://www.shellcheck.net/)
+# Disable source following.
+# shellcheck disable=SC1090,SC1091
+# expr may be antiquated, but it is the only solution in some cases.
+# shellcheck disable=SC2003
+# $() are not fully portable (POSIX != portable).
+# shellcheck disable=SC2006
+
+# Return if test_runner already loaded.
+[ -z "${RUNNER_LOADED:-}" ] || return 0
+RUNNER_LOADED=0
+
+RUNNER_ARGV0=`basename "$0"`
+RUNNER_SHELLS='/bin/sh ash /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
+RUNNER_TEST_SUFFIX='_test.sh'
+true; RUNNER_TRUE=$?
+false; RUNNER_FALSE=$?
+
+runner_warn() { echo "runner:WARN $*" >&2; }
+runner_error() { echo "runner:ERROR $*" >&2; }
+runner_fatal() { echo "runner:FATAL $*" >&2; exit 1; }
+
+runner_usage() {
+ echo "usage: ${RUNNER_ARGV0} [-e key=val ...] [-s shell(s)] [-t test(s)]"
+}
+
+_runner_tests() { echo ./*${RUNNER_TEST_SUFFIX} |sed 's#./##g'; }
+_runner_testName() {
+ # shellcheck disable=SC1117
+ _runner_testName_=`expr "${1:-}" : "\(.*\)${RUNNER_TEST_SUFFIX}"`
+ if [ -n "${_runner_testName_}" ]; then
+ echo "${_runner_testName_}"
+ else
+ echo 'unknown'
+ fi
+ unset _runner_testName_
+}
+
+main() {
+ # Find and load versions library.
+ for _runner_dir_ in . ${LIB_DIR:-lib}; do
+ if [ -r "${_runner_dir_}/versions" ]; then
+ _runner_lib_dir_="${_runner_dir_}"
+ break
+ fi
+ done
+ [ -n "${_runner_lib_dir_}" ] || runner_fatal 'Unable to find versions library.'
+ . "${_runner_lib_dir_}/versions" || runner_fatal 'Unable to load versions library.'
+ unset _runner_dir_ _runner_lib_dir_
+
+ # Process command line flags.
+ env=''
+ while getopts 'e:hs:t:' opt; do
+ case ${opt} in
+ e) # set an environment variable
+ key=`expr "${OPTARG}" : '\([^=]*\)='`
+ val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
+ # shellcheck disable=SC2166
+ if [ -z "${key}" -o -z "${val}" ]; then
+ runner_usage
+ exit 1
+ fi
+ eval "${key}='${val}'"
+ eval "export ${key}"
+ env="${env:+${env} }${key}"
+ ;;
+ h) runner_usage; exit 0 ;; # help output
+ s) shells=${OPTARG} ;; # list of shells to run
+ t) tests=${OPTARG} ;; # list of tests to run
+ *) runner_usage; exit 1 ;;
+ esac
+ done
+ shift "`expr ${OPTIND} - 1`"
+
+ # Fill shells and/or tests.
+ shells=${shells:-${RUNNER_SHELLS}}
+ [ -z "${tests}" ] && tests=`_runner_tests`
+
+ # Error checking.
+ if [ -z "${tests}" ]; then
+ runner_error 'no tests found to run; exiting'
+ exit 1
+ fi
+
+ cat <<EOF
+#------------------------------------------------------------------------------
+# System data.
+#
+
+$ uname -mprsv
+`uname -mprsv`
+
+OS Name: `versions_osName`
+OS Version: `versions_osVersion`
+
+### Test run info.
+shells: ${shells}
+tests: ${tests}
+EOF
+for key in ${env}; do
+ eval "echo \"${key}=\$${key}\""
+done
+
+# Run tests.
+runner_passing_=${RUNNER_TRUE}
+for shell in ${shells}; do
+ echo
+
+ cat <<EOF
+
+#------------------------------------------------------------------------------
+# Running the test suite with ${shell}.
+#
+EOF
+
+ # Check for existence of shell.
+ shell_bin=${shell}
+ shell_name=''
+ shell_present=${RUNNER_FALSE}
+ case ${shell} in
+ ash)
+ shell_bin=`command -v busybox`
+ [ $? -eq "${RUNNER_TRUE}" ] && shell_present="${RUNNER_TRUE}"
+ shell_bin="${shell_bin:+${shell_bin} }ash"
+ shell_name=${shell}
+ ;;
+ *)
+ [ -x "${shell_bin}" ] && shell_present="${RUNNER_TRUE}"
+ shell_name=`basename "${shell}"`
+ ;;
+ esac
+ if [ "${shell_present}" -eq "${RUNNER_FALSE}" ]; then
+ runner_warn "unable to run tests with the ${shell_name} shell"
+ continue
+ fi
+
+ shell_version=`versions_shellVersion "${shell}"`
+
+ echo "shell name: ${shell_name}"
+ echo "shell version: ${shell_version}"
+
+ # Execute the tests.
+ for t in ${tests}; do
+ echo
+ echo "--- Executing the '`_runner_testName "${t}"`' test suite. ---"
+ # ${shell_bin} needs word splitting.
+ # shellcheck disable=SC2086
+ ( exec ${shell_bin} "./${t}" 2>&1; )
+ test "${runner_passing_}" -eq ${RUNNER_TRUE} -a $? -eq ${RUNNER_TRUE}
+ runner_passing_=$?
+ done
+ done
+ return ${runner_passing_}
+}
+
+# Execute main() if this is run in standalone mode (i.e. not from a unit test).
+[ -z "${SHUNIT_VERSION}" ] && main "$@"