diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-24 09:54:23 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-24 09:54:44 +0000 |
commit | 836b47cb7e99a977c5a23b059ca1d0b5065d310e (patch) | |
tree | 1604da8f482d02effa033c94a84be42bc0c848c3 /fluent-bit/tests/runtime_shell/in_tail | |
parent | Releasing debian version 1.44.3-2. (diff) | |
download | netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.tar.xz netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.zip |
Merging upstream version 1.46.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fluent-bit/tests/runtime_shell/in_tail')
10 files changed, 0 insertions, 1462 deletions
diff --git a/fluent-bit/tests/runtime_shell/in_tail/README.md b/fluent-bit/tests/runtime_shell/in_tail/README.md deleted file mode 100644 index 78c77525..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# Fluent Bit Tail Input Plugin Tests - -The following directory contains tests for Tail input plugin behaviors. - -## run_tests.sh - -This script provide validations for offsets, database file entries and rotation under different scenarios. The following tests are available in the script - -- test_normal_rotation -- test_single_static_rotation -- test_truncate -- test_rotate_link -- test_truncate_link - -Running the script ```test_rotation.sh``` will run every test listed above, to run a single test just append it name, e.g: - -``` -./test_rotation.sh -- test_truncate -``` - -### 1. Normal Rotation - -**Unit** - -```test_normal_rotation``` - -**Description** - -Run the logger tool that creates 5 different files, write 100000 messages to each one while rotating at 256KB. - -This test enable the database backend for Tail so it also helps to validate expected entries into the 'in_tail_files' table. - -**Configuration File** - -```conf/normal_rotation.conf``` - -### 2. Single Static Rotation - -**Unit** - -```test_single_static_rotation``` - -**Description** - -Run the logger tool that creates 1 big file and let Fluent Bit process it in the static mode, before to promote it to 'events' and it gets rotated. - -**Configuration File** - -```conf/single_static_rotation.conf``` - -### 3. Truncate - -**Unit** - -```test_truncate``` - -**Description** - - Some environments still rely on truncation mode or well known as copytruncate, - this is the definition by logrotate(8): - -> Truncate the original log file to zero size in place after creating a copy, -> instead of moving the old log file and optionally creating a new one. It -> can be used when some program cannot be told to close its logfile and -> thus might continue writing (appending) to the previous log file forever. -> -> Note that there is a very small time slice between copying the file and -> truncating it, so some logging data might be lost. When this option is -> used, the create option will have no effect, as the old log file stays in -> place. - -This test checks that after a truncation the new lines added are properly -processed. - -**Configuration File** - -```conf/truncate_rotation.conf``` - -### 4. Rotate Link - -**Unit** - -```test_rotate_link``` - -**Description** - -This test checks that a monitored link, upon rotation, keeps the proper offset and database status for the real file. - - Example: - - - file with data: data.log - - monitored link: test.log - - Check the behavior upon the following rotation: test.log -> test.log.1 - -**Configuration File** - -```conf/rotate_link.conf``` - -### 5. Truncate Link - -**Unit** - -```test_truncate_link``` - -**Description** - -Test a link that gets a truncation and Fluent Bit properly use the new offset - -**Configuration File** - -```conf/truncate_link.conf``` - -### 6. Multiline Rotation - -**Unit** - -```test_multiline_rotation``` - -**Description** - -Test a multiline rotation for issue 4190. - -**Configuration File** - -```conf/multiline_rotation.conf``` diff --git a/fluent-bit/tests/runtime_shell/in_tail/conf/multiline_rotation.conf b/fluent-bit/tests/runtime_shell/in_tail/conf/multiline_rotation.conf deleted file mode 100644 index 3695014a..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/conf/multiline_rotation.conf +++ /dev/null @@ -1,60 +0,0 @@ -[SERVICE] - flush 1 - daemon off - log_level debug - log_file ${TEST_DIR}/out.log - -[INPUT] - name tail - tag a - path ${TEST_DIR}/a.log - db ${TEST_DIR}/a.db - db.sync full - multiline.parser cri - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag b - path ${TEST_DIR}/b.log - db ${TEST_DIR}/b.db - db.sync full - multiline.parser cri - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag c - path ${TEST_DIR}/c.log - db ${TEST_DIR}/c.db - db.sync full - multiline.parser cri - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag d - path ${TEST_DIR}/d.log - db ${TEST_DIR}/d.db - db.sync full - multiline.parser cri - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag e - path ${TEST_DIR}/e.log - db ${TEST_DIR}/e.db - db.sync full - multiline.parser cri - rotate_wait 5 - refresh_interval 2 - -[OUTPUT] - name file - match * - path ${TEST_DIR} diff --git a/fluent-bit/tests/runtime_shell/in_tail/conf/normal_rotation.conf b/fluent-bit/tests/runtime_shell/in_tail/conf/normal_rotation.conf deleted file mode 100644 index 0265df5d..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/conf/normal_rotation.conf +++ /dev/null @@ -1,55 +0,0 @@ -[SERVICE] - flush 1 - daemon off - log_level debug - log_file ${TEST_DIR}/out.log - -[INPUT] - name tail - tag a - path ${TEST_DIR}/a.log - db ${TEST_DIR}/a.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag b - path ${TEST_DIR}/b.log - db ${TEST_DIR}/b.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag c - path ${TEST_DIR}/c.log - db ${TEST_DIR}/c.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag d - path ${TEST_DIR}/d.log - db ${TEST_DIR}/d.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[INPUT] - name tail - tag e - path ${TEST_DIR}/e.log - db ${TEST_DIR}/e.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[OUTPUT] - name file - match * - path ${TEST_DIR} diff --git a/fluent-bit/tests/runtime_shell/in_tail/conf/rotate_link.conf b/fluent-bit/tests/runtime_shell/in_tail/conf/rotate_link.conf deleted file mode 100644 index f0b017d4..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/conf/rotate_link.conf +++ /dev/null @@ -1,20 +0,0 @@ -[SERVICE] - flush 1 - daemon off - log_level debug - log_file ${TEST_DIR}/out.log - -[INPUT] - name tail - tag a - path ${TEST_DIR}/a.log - db ${TEST_DIR}/a.db - db.sync full - rotate_wait 5 - watcher_interval 1 - refresh_interval 2 - -[OUTPUT] - name file - match * - path ${TEST_DIR} diff --git a/fluent-bit/tests/runtime_shell/in_tail/conf/single_static_rotation.conf b/fluent-bit/tests/runtime_shell/in_tail/conf/single_static_rotation.conf deleted file mode 100644 index ac883c95..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/conf/single_static_rotation.conf +++ /dev/null @@ -1,19 +0,0 @@ -[SERVICE] - flush 1 - daemon off - log_level debug - log_file ${TEST_DIR}/out.log - -[INPUT] - name tail - tag a - path ${TEST_DIR}/a.log - db ${TEST_DIR}/a.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[OUTPUT] - name file - match * - path ${TEST_DIR} diff --git a/fluent-bit/tests/runtime_shell/in_tail/conf/truncate_link.conf b/fluent-bit/tests/runtime_shell/in_tail/conf/truncate_link.conf deleted file mode 100644 index 6cfa5410..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/conf/truncate_link.conf +++ /dev/null @@ -1,19 +0,0 @@ -[SERVICE] - flush 1 - daemon off - log_level debug - log_file ${TEST_DIR}/out.log - -[INPUT] - name tail - tag a - path ${TEST_DIR}/a.log - db ${TEST_DIR}/a.db - db.sync full - rotate_wait 5 - refresh_interval 1 - -[OUTPUT] - name file - match * - path ${TEST_DIR} diff --git a/fluent-bit/tests/runtime_shell/in_tail/conf/truncate_rotation.conf b/fluent-bit/tests/runtime_shell/in_tail/conf/truncate_rotation.conf deleted file mode 100644 index ac883c95..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/conf/truncate_rotation.conf +++ /dev/null @@ -1,19 +0,0 @@ -[SERVICE] - flush 1 - daemon off - log_level debug - log_file ${TEST_DIR}/out.log - -[INPUT] - name tail - tag a - path ${TEST_DIR}/a.log - db ${TEST_DIR}/a.db - db.sync full - rotate_wait 5 - refresh_interval 2 - -[OUTPUT] - name file - match * - path ${TEST_DIR} diff --git a/fluent-bit/tests/runtime_shell/in_tail/logger_file.py b/fluent-bit/tests/runtime_shell/in_tail/logger_file.py deleted file mode 100755 index a8e2a1b3..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/logger_file.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/env python - -import sys -import uuid -import time -import signal -import logging -from argparse import ArgumentParser -from logging.handlers import RotatingFileHandler -from threading import Thread - -class LoggerManager: - def __init__(self, args): - # KB to bytes - self.max_bytes = (args.size * 1000) - self.backup = args.backup - self.lines = args.lines - self.delay = args.delay - self.threads = [] - - # Create a thread for every writer - for f in args.filenames: - thread = Thread(target = self.single_logger_thread, args = (f,)) - if thread is None: - print("error creating thread") - sys.exit(1) - self.threads.append(thread) - thread.start() - print("Logger thread for '" + f + "' has started") - - for th in self.threads: - th.join() - print("Logger thread finished") - - def single_logger_thread(self, name): - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - handler = RotatingFileHandler(name, maxBytes = self.max_bytes, - backupCount = self.backup) - logger.addHandler(handler) - rnd = uuid.uuid4() - - i = 0 - while i < self.lines: - logger.debug(rnd) - if self.delay > 0.0: - time.sleep(self.delay / 1000.0) - i = i + 1 - -def signal_handler(sig, frame): - print("stopping logger") - sys.exit(0) - -if __name__ == '__main__': - signal.signal(signal.SIGINT, signal_handler) - - # Define arguments - parser = ArgumentParser() - parser.add_argument("-b", "--backup", dest="backup", default=50, type=int) - parser.add_argument("-d", "--delay", dest="delay", default=0.1, type=float, - help="milliseconds delay between line writes") - parser.add_argument("-l", "--lines", dest="lines", default=1000, type=int) - parser.add_argument("-f", "--file", dest="filenames", action='append', required=True, - help="write logs to FILE", metavar="FILE") - parser.add_argument("-s", "--size", dest="size", type=int, - help="maximum log file size in KB before rotation", - default=256) - # Read arguments - args = parser.parse_args() - - # Start the Logger - lm = LoggerManager(args) diff --git a/fluent-bit/tests/runtime_shell/in_tail/run_tests.sh b/fluent-bit/tests/runtime_shell/in_tail/run_tests.sh deleted file mode 100755 index 09bb9c43..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/run_tests.sh +++ /dev/null @@ -1,530 +0,0 @@ -#!/bin/sh - -# Environment variables -FLB_BIN=`realpath ../../../build/bin/fluent-bit` -FLB_RUNTIME_SHELL_PATH=`realpath $(pwd)/../` -FLB_RUN_TEST=`realpath $FLB_RUNTIME_SHELL_PATH/../lib/shunit2/shunit2` - -# Colorize shunit2 -bold=$(tput bold) -normal=$(tput sgr0) -SHUNIT_TEST_PREFIX="$bold==========> UNIT TEST: $normal" - -# 1. Normal Rotation -# ------------------ -# Run the logger tool that creates 5 different files, write 100000 messages to each one -# while rotating at 256KB. -# -# This test enable the database backend for Tail so it also helps to validate expected -# entries into the 'in_tail_files' table. -# -# Configuration file used: conf/normal_rotation.conf - -test_normal_rotation() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - sqlite3 $1/$2 -batch \ - ".headers off" ".width 20" "SELECT inode FROM in_tail_files" > \ - $1/$2.inodes - - rows=`cat $1/$2.inodes | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> database file $1/$2 contains $rows rows, inodes:" - cat $1/$2.inodes - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - for logfile in a b c d e ; do - touch $TEST_DIR/$logfile.log - done - - # Run Fluent Bit - $FLB_BIN -c conf/normal_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Start the Logger: 5 files = 500000 log lines in total - python logger_file.py -l 100000 -s 256 -b 100 -d 0.1 \ - -f $TEST_DIR/a.log \ - -f $TEST_DIR/b.log \ - -f $TEST_DIR/c.log \ - -f $TEST_DIR/d.log \ - -f $TEST_DIR/e.log - - echo "Logger finished...wait 10 seconds" - sleep 10 - - # Count number of processed lines - write_lines=`cat $TEST_DIR/[abcdefghij].log* | wc -l` - read_lines=`cat $TEST_DIR/[abcdefghij] | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Validate our database files has only one remaining entry per database file - for logfile in a b c d e; do - sqlite_check $TEST_DIR "$logfile.db" $FLB_PID - done - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 2. Single Static Rotation (static process mode + rotation) -# ---------------------------------------------------------- -# Run the logger tool that creates 1 big file and let Fluent Bit process it in -# the static mode, before to promote it to 'events' and it gets rotated. -# -# Configuration file used: conf/single_static_rotation.conf - -test_single_static_rotation() { - # Write a log file of 200000 lines - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/a.log - - # Start the Logger: 1 file with 400000 lines, we use a big size (-s) to - # avoid rotation - python logger_file.py -l 400000 -s 200000 -b 100 -d 0 \ - -f $TEST_DIR/a.log - lines=`cat $TEST_DIR/a.log | wc -l` - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/single_static_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 3 seconds before rotation - sleep 2 - mv $TEST_DIR/a.log $TEST_DIR/a.log.1 - - lines=`cat $TEST_DIR/a | wc -l` - echo "file Rotated, mid-check: processed lines $lines" - sleep 30 - - # Count number of processed lines - write_lines=`cat $TEST_DIR/a.log.1 | wc -l` - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Validate our database files has only one remaining entry per database file - #sqlite_check $TEST_DIR "$logfile.db" $FLB_PID - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 3. Truncate -# ----------- -# Some environments still rely on truncation mode or well known as copytruncate, -# this is the definition by logrotate(8): -# -# "Truncate the original log file to zero size in place after creating a copy, -# instead of moving the old log file and optionally creating a new one. It -# can be used when some program cannot be told to close its logfile and -# thus might continue writing (appending) to the previous log file forever. -# -# Note that there is a very small time slice between copying the file and -# truncating it, so some logging data might be lost. When this option is -# used, the create option will have no effect, as the old log file stays in -# place." -# -# This test checks that after a truncation the new lines added are properly -# processed. -# -# Configuration file used: conf/truncate_rotation.conf - -test_truncate() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the 'a.log' file and check we have the same value - # in the database - offset=`wc -c < $TEST_DIR/a.log` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset" > \ - $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/a.log - - # Start the Logger: 1 file with 200 lines, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - lines=`cat $TEST_DIR/a.log | wc -l` - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/truncate_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 2 seconds before truncation - sleep 2 - pre_lines=`cat $TEST_DIR/a.log | wc -l` - truncate -s 0 $TEST_DIR/a.log - - lines=`cat $TEST_DIR/a | wc -l` - echo "file truncated, mid-check: processed lines $lines" - - # Append 100 more lines - python logger_file.py -l 100 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - - sleep 3 - - # Count number of processed lines - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - sqlite_check $TEST_DIR a.db $FLB_PID - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 4. Rotate Link -# -------------- -# This case checks that a monitored link, upon rotation, keeps the proper offset -# and database status for the real file. -# -# Example: -# -# - file with data: data.log -# - monitored link: test.log -# -# Check the behavior upon test.log -> test.log.1 behavior -# -# Configuration file used: conf/rotate_link.conf - -test_rotate_link() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the file pointed by 'a.log.1' and check we have the - # same value in the database - offset=`wc -c < $TEST_DIR/a.log.1` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset \ - AND rotated=1" > $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> offset database check $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - - # After rotate_wait (5 secs + watcher) we expect an empty database - sleep 6 - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset \ - AND rotated=1" > $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "0" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> empty database check $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "0" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/data.log - - # Start the Logger: 1 file with 100 lines, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 100 -s 200000 -b 100 -d 0 -f $TEST_DIR/data.log - lines=`cat $TEST_DIR/data.log | wc -l` - ln -s data.log $TEST_DIR/a.log - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/rotate_link.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 2 seconds and rotate file - sleep 2 - pre_lines=`cat $TEST_DIR/a.log | wc -l` - mv $TEST_DIR/a.log $TEST_DIR/a.log.1 - - lines=`cat $TEST_DIR/a | wc -l` - echo "file rotated, mid-check: processed lines $lines" - - # Append 200 more lines to the rotated link - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log.1 - - # Count number of processed lines - sleep 3 - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Check that database file have the right offset and mark the file as rotated - sqlite_check $TEST_DIR a.db $FLB_PID - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 5. Truncate Link -# -# Test a link that gets a truncation and Fluent Bit properly use the new offset -# -# Configuration file used: conf/truncate_link.conf - -test_truncate_link() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the 'a.log' file and check we have the same value - # in the database - offset=`wc -c < $TEST_DIR/a.log` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset" > \ - $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/data.log - - # Start the Logger: 1 file with 100 lines, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 100 -s 200000 -b 100 -d 0 -f $TEST_DIR/data.log - lines=`cat $TEST_DIR/data.log | wc -l` - ln -s data.log $TEST_DIR/a.log - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/truncate_link.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 1 second before truncation - sleep 1 - pre_lines=`cat $TEST_DIR/a.log | wc -l` - truncate -s 0 $TEST_DIR/a.log - - sleep 2 - lines=`cat $TEST_DIR/a | wc -l` - echo "file truncated, mid-check: processed lines $lines" - - # Append 200 more lines - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - - sleep 4 - - # Count number of processed lines - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 6. Multiline + rotation -# ------------------ -# Run the logger tool that creates 5 different files, write 100000 messages to each one -# while rotating at 256KB. -# -# This test for issue 4190 -# -# Configuration file used: conf/multiline_rotation.conf - -test_multiline_rotation() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - sqlite3 $1/$2 -batch \ - ".headers off" ".width 20" "SELECT inode FROM in_tail_files" > \ - $1/$2.inodes - - rows=`cat $1/$2.inodes | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> database file $1/$2 contains $rows rows, inodes:" - cat $1/$2.inodes - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - for logfile in a b c d e ; do - touch $TEST_DIR/$logfile.log - done - - # Run Fluent Bit - $FLB_BIN -c conf/multiline_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Start the Logger: 5 files = 500000 log lines in total - python logger_file.py -l 100000 -s 256 -b 100 -d 0.1 \ - -f $TEST_DIR/a.log \ - -f $TEST_DIR/b.log \ - -f $TEST_DIR/c.log \ - -f $TEST_DIR/d.log \ - -f $TEST_DIR/e.log - - echo "Logger finished...wait 10 seconds" - sleep 10 - - # Count number of processed lines - write_lines=`cat $TEST_DIR/[abcdefghij].log* | wc -l` - read_lines=`cat $TEST_DIR/[abcdefghij] | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Validate our database files has only one remaining entry per database file - for logfile in a b c d e; do - sqlite_check $TEST_DIR "$logfile.db" $FLB_PID - done - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# Launch the tests -. $FLB_RUN_TEST diff --git a/fluent-bit/tests/runtime_shell/in_tail/test_rotation.sh b/fluent-bit/tests/runtime_shell/in_tail/test_rotation.sh deleted file mode 100644 index 889fa7b0..00000000 --- a/fluent-bit/tests/runtime_shell/in_tail/test_rotation.sh +++ /dev/null @@ -1,542 +0,0 @@ -#!/bin/sh - -# Environment variables -FLB_BIN=`realpath ../../../build/bin/fluent-bit` -FLB_RUNTIME_SHELL_PATH=`realpath $(pwd)/../` -FLB_RUN_TEST=`realpath $FLB_RUNTIME_SHELL_PATH/../lib/shunit2/shunit2` - -# Colorize shunit2 -bold=$(tput bold) -normal=$(tput sgr0) -SHUNIT_TEST_PREFIX="$bold==========> UNIT TEST: $normal" - -# 1. Normal Rotation -# ------------------ -# Run the logger tool that creates 5 different files, write 100000 messages to each one -# while rotating at 256KB. -# -# This test enable the database backend for Tail so it also helps to validate expected -# entries into the 'in_tail_files' table. -# -# Configuration file used: conf/normal_rotation.conf - -test_normal_rotation() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - sqlite3 $1/$2 -batch \ - ".headers off" ".width 20" "SELECT inode FROM in_tail_files" > \ - $1/$2.inodes - - rows=`cat $1/$2.inodes | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> database file $1/$2 contains $rows rows, inodes:" - cat $1/$2.inodes - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - for logfile in a b c d e ; do - touch $TEST_DIR/$logfile.log - done - - # Run Fluent Bit - $FLB_BIN -c conf/normal_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Start the Logger: 5 files = 500000 log lines in total - python logger_file.py -l 100000 -s 256 -b 100 -d 0.1 \ - -f $TEST_DIR/a.log \ - -f $TEST_DIR/b.log \ - -f $TEST_DIR/c.log \ - -f $TEST_DIR/d.log \ - -f $TEST_DIR/e.log - - echo "Logger finished...wait 10 seconds" - sleep 10 - - # Count number of processed lines - write_lines=`cat $TEST_DIR/[abcdefghij].log* | wc -l` - read_lines=`cat $TEST_DIR/[abcdefghij] | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Validate our database files has only one remaining entry per database file - for logfile in a b c d e; do - sqlite_check $TEST_DIR "$logfile.db" $FLB_PID - done - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 2. Single Static Rotation (static process mode + rotation) -# ---------------------------------------------------------- -# Run the logger tool that creates 1 big file and let Fluent Bit process it in -# the static mode, before to promote it to 'events' and it gets rotated. -# -# Configuration file used: conf/single_static_rotation.conf - -test_single_static_rotation() { - # Write a log file of 200000 lines - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/a.log - - # Start the Logger: 1 file with 400000 lines, we use a big size (-s) to - # avoid rotation - python logger_file.py -l 400000 -s 200000 -b 100 -d 0 \ - -f $TEST_DIR/a.log - lines=`cat $TEST_DIR/a.log | wc -l` - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/single_static_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 3 seconds before rotation - sleep 2 - mv $TEST_DIR/a.log $TEST_DIR/a.log.1 - - lines=`cat $TEST_DIR/a | wc -l` - echo "file Rotated, mid-check: processed lines $lines" - sleep 30 - - # Count number of processed lines - write_lines=`cat $TEST_DIR/a.log.1 | wc -l` - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Validate our database files has only one remaining entry per database file - #sqlite_check $TEST_DIR "$logfile.db" $FLB_PID - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 3. Truncate -# ----------- -# Some environments still rely on truncation mode or well known as copytruncate, -# this is the definition by logrotate(8): -# -# "Truncate the original log file to zero size in place after creating a copy, -# instead of moving the old log file and optionally creating a new one. It -# can be used when some program cannot be told to close its logfile and -# thus might continue writing (appending) to the previous log file forever. -# -# Note that there is a very small time slice between copying the file and -# truncating it, so some logging data might be lost. When this option is -# used, the create option will have no effect, as the old log file stays in -# place." -# -# This test checks that after a truncation the new lines added are properly -# processed. -# -# Configuration file used: conf/truncate_rotation.conf - -test_truncate() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the 'a.log' file and check we have the same value - # in the database - offset=`wc -c < $TEST_DIR/a.log` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset" > \ - $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/a.log - - # Start the Logger: 1 file with 200 lines, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - lines=`cat $TEST_DIR/a.log | wc -l` - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/truncate_rotation.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 2 seconds before truncation - sleep 2 - pre_lines=`cat $TEST_DIR/a.log | wc -l` - truncate -s 0 $TEST_DIR/a.log - - lines=`cat $TEST_DIR/a | wc -l` - echo "file truncated, mid-check: processed lines $lines" - - # Append 100 more lines - python logger_file.py -l 100 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - - sleep 3 - - # Count number of processed lines - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - sqlite_check $TEST_DIR a.db $FLB_PID - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 4. Rotate Link -# -------------- -# This case checks that a monitored link, upon rotation, keeps the proper offset -# and database status for the real file. -# -# Example: -# -# - file with data: data.log -# - monitored link: test.log -# -# Check the behavior upon test.log -> test.log.1 behavior -# -# Configuration file used: conf/rotate_link.conf - -test_rotate_link() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the file pointed by 'a.log.1' and check we have the - # same value in the database - offset=`wc -c < $TEST_DIR/a.log.1` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset \ - AND rotated=1" > $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> offset database check $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - - # After rotate_wait (5 secs + watcher) we expect an empty database - sleep 6 - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset \ - AND rotated=1" > $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "0" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> empty database check $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "0" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/data.log - - # Start the Logger: 1 file with 100 lines, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 100 -s 200000 -b 100 -d 0 -f $TEST_DIR/data.log - lines=`cat $TEST_DIR/data.log | wc -l` - ln -s data.log $TEST_DIR/a.log - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/rotate_link.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 2 seconds and rotate file - sleep 2 - pre_lines=`cat $TEST_DIR/a.log | wc -l` - mv $TEST_DIR/a.log $TEST_DIR/a.log.1 - - lines=`cat $TEST_DIR/a | wc -l` - echo "file rotated, mid-check: processed lines $lines" - - # Append 200 more lines to the rotated link - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log.1 - - # Count number of processed lines - sleep 3 - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Check that database file have the right offset and mark the file as rotated - sqlite_check $TEST_DIR a.db $FLB_PID - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 5. Truncate Link -# -# Test a link that gets a truncation and Fluent Bit properly use the new offset -# -# Configuration file used: conf/truncate_link.conf - -test_truncate_link() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the 'a.log' file and check we have the same value - # in the database - offset=`wc -c < $TEST_DIR/a.log` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset" > \ - $1/$2.offset - - rows=`cat $1/$2.offset | wc -l | tr -d -C '[0-9]'` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Create empty files so Fluent Bit will enqueue them on start - touch $TEST_DIR/data.log - - # Start the Logger: 1 file with 100 lines, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 100 -s 200000 -b 100 -d 0 -f $TEST_DIR/data.log - lines=`cat $TEST_DIR/data.log | wc -l` - ln -s data.log $TEST_DIR/a.log - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/truncate_link.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 1 second before truncation - sleep 1 - pre_lines=`cat $TEST_DIR/a.log | wc -l` - truncate -s 0 $TEST_DIR/a.log - - sleep 2 - lines=`cat $TEST_DIR/a | wc -l` - echo "file truncated, mid-check: processed lines $lines" - - # Append 200 more lines - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - - sleep 4 - - # Count number of processed lines - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - -# 6. Database Resume -# -# Tests that if Fluent Bit stops, it can resume reading files from the last position -# -# Configuration file used: conf/database_resume.conf - -test_database_resume() { - # Helper function to check monitored files - sqlite_check() - { - # Incoming parameters: - # $1: temporal directory to store data - # $2: database file name - # $3: Fluent Bit PID - # - # This function store the remaining monitored files listed in the database, - # we send the output to an .inodes for troubleshooting purposes if required - - # Get the last size of the 'a.log' file and check we have the same value - # in the database - offset=`stat -c %s $TEST_DIR/a.log` - - sqlite3 $1/$2 -batch \ - ".headers off" "SELECT inode FROM in_tail_files WHERE offset=$offset" > \ - $1/$2.offset - - rows=`cat $1/$2.offset | wc -l` - if [ $rows != "1" ]; then - echo "> invalid database content:" - cat $1/$2.offset - echo "> open files" - ls -l /proc/$3/fd/ | grep \\.log - else - echo "> database file $1/$2 is OK" - fi - ${_ASSERT_EQUALS_} "1" $rows - } - - # Prepare test directory - export TEST_DIR=tmp_test - rm -rf $TEST_DIR - mkdir $TEST_DIR - - # Start the Logger: 5 files with 10000 lines each, we use a big size limit (-s) to - # avoid rotation - python logger_file.py -l 10000 -s 200000 -b 100 -d 0 \ - -f $TEST_DIR/a.log \ - -f $TEST_DIR/b.log \ - -f $TEST_DIR/c.log \ - -f $TEST_DIR/d.log \ - -f $TEST_DIR/e.log - - lines=`cat $TEST_DIR/[abcde].log | wc -l` - echo "Logger done, written lines "$lines - - # Run Fluent Bit - $FLB_BIN -c conf/database_resume.conf & - FLB_PID=$! - echo "Fluent Bit started, pid=$FLB_PID" - - # Wait 1 second before stop Fluent Bit - sleep 1 - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID - - sqlite3 $TEST_DIR/logs.db "SELECT * FROM in_tail_files" - exit 0 - - sleep 2 - lines=`cat $TEST_DIR/a | wc -l` - echo "file truncated, mid-check: processed lines $lines" - - # Append 200 more lines - python logger_file.py -l 200 -s 200000 -b 100 -d 0 -f $TEST_DIR/a.log - - sleep 4 - - # Count number of processed lines - write_lines=300 - read_lines=`cat $TEST_DIR/a | wc -l` - - echo "> write lines: $write_lines" - echo "> read lines : $read_lines" - - # Check we processed same number of records - ${_ASSERT_EQUALS_} $write_lines $read_lines - - # Stop Fluent Bit (SIGTERM) - kill -15 $FLB_PID -} - - -# Launch the tests -. $FLB_RUN_TEST |