summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 11:08:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 11:08:07 +0000
commitc69cb8cc094cc916adbc516b09e944cd3d137c01 (patch)
treef2878ec41fb6d0e3613906c6722fc02b934eeb80 /tests
parentInitial commit. (diff)
downloadnetdata-003423236c4cd249ed4246231d71a062f8f3d45a.tar.xz
netdata-003423236c4cd249ed4246231d71a062f8f3d45a.zip
Adding upstream version 1.29.3.upstream/1.29.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/Makefile.am45
-rw-r--r--tests/README.md148
-rw-r--r--tests/acls/acl.sh.in126
-rw-r--r--tests/acls/netdata.cfg20
-rw-r--r--tests/acls/netdata.ssl.cfg24
-rw-r--r--tests/alarm_repetition/alarm.sh.in89
-rw-r--r--tests/alarm_repetition/netdata.conf_with_repetition57
-rw-r--r--tests/alarm_repetition/netdata.conf_without_repetition57
-rw-r--r--tests/alarm_repetition/ram_with_repetition.conf64
-rw-r--r--tests/alarm_repetition/ram_without_repetition.conf63
-rw-r--r--tests/api/fuzzer.py378
-rw-r--r--tests/backends/prometheus-avg-oldunits.txt148
-rw-r--r--tests/backends/prometheus-avg.txt148
-rw-r--r--tests/backends/prometheus-raw.txt156
-rwxr-xr-xtests/backends/prometheus.bats31
-rw-r--r--tests/health_mgmtapi/README.md15
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/HOSTS-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/RESET-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_2-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_3-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json1
-rwxr-xr-xtests/health_mgmtapi/health-cmdapi-test.sh.in226
-rwxr-xr-xtests/installer/checksums.sh54
-rwxr-xr-xtests/installer/slack.sh65
-rw-r--r--tests/k6/data.js67
-rwxr-xr-xtests/lifecycle.bats61
-rw-r--r--tests/node.d/fronius.chart.spec.js162
-rw-r--r--tests/node.d/fronius.parse.spec.js333
-rw-r--r--tests/node.d/fronius.process.spec.js75
-rw-r--r--tests/node.d/fronius.validation.spec.js155
-rw-r--r--tests/profile/Makefile51
-rw-r--r--tests/profile/benchmark-dictionary.c130
-rw-r--r--tests/profile/benchmark-line-parsing.c702
-rw-r--r--tests/profile/benchmark-procfile-parser.c329
-rw-r--r--tests/profile/benchmark-registry.c227
-rw-r--r--tests/profile/benchmark-value-pairs.c623
-rw-r--r--tests/profile/statsd-stress.c151
-rw-r--r--tests/profile/test-eval.c299
-rwxr-xr-xtests/run-unit-tests.sh39
-rwxr-xr-xtests/stress.sh83
-rw-r--r--tests/template_dimension/system_cpu.conf.alarm_foreach8
-rw-r--r--tests/template_dimension/system_cpu.conf.alarm_foreach_sp8
-rw-r--r--tests/template_dimension/system_cpu.conf.template_alarm26
-rw-r--r--tests/template_dimension/system_cpu.conf.template_foreach8
-rw-r--r--tests/template_dimension/system_cpu.conf.template_foreach_sp8
-rw-r--r--tests/template_dimension/system_cpu.conf.unique_alarm26
-rw-r--r--tests/template_dimension/template_dim.sh.in88
-rwxr-xr-xtests/updater_checks.bats66
-rwxr-xr-xtests/updater_checks.sh71
-rw-r--r--tests/urls/request.sh.in307
-rw-r--r--tests/web/easypiechart.chart.spec.js39
-rw-r--r--tests/web/easypiechart.percentage.spec.js142
-rw-r--r--tests/web/fixtures/easypiechart.chart.fixture1.html6
-rw-r--r--tests/web/karma.conf.js110
-rw-r--r--tests/web/lib/jasmine-jquery.js841
66 files changed, 7171 insertions, 0 deletions
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 0000000..29fe7ee
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ alarm_repetition/alarm.sh \
+ template_dimension/template_dim.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_noinst_DATA = \
+ README.md \
+ web/lib/jasmine-jquery.js \
+ web/easypiechart.chart.spec.js \
+ web/easypiechart.percentage.spec.js \
+ web/karma.conf.js \
+ web/fixtures/easypiechart.chart.fixture1.html \
+ node.d/fronius.chart.spec.js \
+ node.d/fronius.parse.spec.js \
+ node.d/fronius.process.spec.js \
+ node.d/fronius.validation.spec.js \
+ health_mgmtapi/health-cmdapi-test.sh.in \
+ acls/acl.sh.in \
+ urls/request.sh.in \
+ alarm_repetition/alarm.sh.in \
+ template_dimension/template_dim.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ alarm_repetition/alarm.sh \
+ template_dimension/template_dim.sh \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ stress.sh \
+ $(NULL)
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..256b482
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,148 @@
+<!--
+title: "Testing"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/tests/README.md
+-->
+
+# Testing
+
+This readme is a manual on how to get started with unit testing on javascript and nodejs
+
+Original author: BrainDoctor (github), July 2017
+
+## Installation
+
+Tested on Linux Mint 18.2 Sara (Ubuntu/debian derivative)
+
+Make sure you are the user who is developer (permissions, except sudo ofc)
+
+```sh
+sudo apt-get install nodejs npm chromium-browser
+
+cd /path/to/your/netdata
+npm install
+```
+
+That should install the necessary node modules.
+
+Other browsers work too (Chrome, Firefox). However, only the Chromium Browser 59 has been tested for headless unit testing.
+
+### Versions
+
+The commands above leave me with the following versions (July 2017):
+
+- nodejs: v4.2.6
+- npm: 3.5.2
+- chromium-browser: 59.0.3071.109
+- WebStorm (optional): 2017.1.4
+
+## Configuration
+
+### NPM
+
+The dependencies are installed in `netdata/package.json`. If you install a new NPM module, it gets added here. Future developers just need to execute `npm install` and every dep gets added automatically.
+
+### Karma
+
+Karma configuration is in `tests/web/karma.conf.js`. Documentation is provided via comments.
+
+### WebStorm
+
+If you use the JetBrains WebStorm IDE, you can integrate the karma runtime.
+
+#### for Karma (Client side testing)
+
+Headless Chromium:
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Headless Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: ChromiumHeadless
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
+
+GUI Chromium is similar:
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: Chromium
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
+
+You may add other browsers too (comma separated). With the "Browsers to start" field you can override any settings in karma.conf.js.
+
+Also it is recommended to install WebStorm IDE Extension/Addon to Chrome/Chromium for awesome debugging.
+
+#### for node.d plugins (nodejs)
+
+1. Run > Edit Configurations
+2. "+" > Node.js
+3. - Name: Node.d plugins
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
+ - Application parameters: --captureExceptions tests/node.d
+
+## Running
+
+### In WebStorm
+
+#### Karma
+
+Just run the configured run configurations and they produce nice test trees:
+
+![karma_run_2](https://user-images.githubusercontent.com/12159026/28277789-559149f6-6b1b-11e7-9cc7-a81d81d12c35.png)
+
+#### node.js
+
+Debugging is awesome too!
+![node_debug](https://user-images.githubusercontent.com/12159026/28277879-8beee5ee-6b1b-11e7-9356-3156956f2282.png)
+
+### From CLI
+
+#### Karma
+
+```sh
+cd /path/to/your/netdata
+
+nodejs ./node_modules/karma/bin/karma start tests/web/karma.conf.js --single-run=true --browsers=ChromiumHeadless
+```
+
+will start the karma server, start chromium in headless mode and exit.
+
+If a test fails, it produces even a stack trace:
+![karma_run_1](https://user-images.githubusercontent.com/12159026/28277754-3682bebe-6b1b-11e7-8b7e-66b23d87177d.png)
+
+#### Node.d plugins
+
+```sh
+cd /path/to/your/netdata
+
+nodejs node_modules/jasmine-node/bin/jasmine-node --captureExceptions tests/node.d
+```
+
+will run the tests in `tests/node.d` and produce a stacktrace too on error:
+![node_run](https://user-images.githubusercontent.com/12159026/28277812-65bb69b0-6b1b-11e7-8500-bcdbb3436574.png)
+
+### Coverage
+
+#### Karma
+
+A nice HTML is produced from Karma which shows which code paths were executed. It is located somewhere in `/path/to/your/netdata/coverage/`
+
+![coverage_2](https://user-images.githubusercontent.com/12159026/28277719-142146c4-6b1b-11e7-9992-3e88dee2efd2.png)
+and
+![coverage_1](https://user-images.githubusercontent.com/12159026/28277687-fa93e360-6b1a-11e7-995f-cbb4c5d012a7.png)
+
+#### Node.d
+
+Apparently, jasmine-node can produce a junit report with the `--junitreport` flag. But that output was not very useful. Maybe it's configurable?
+
+### CI
+
+The karma and node.d runners can be integrated in Travis (AFAIK), but that is outside my ability.
+
+Note: Karma is for browser-testing. On a build server, no GUI or browser might by available, unless browsers support headless mode.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/tests/acls/acl.sh.in b/tests/acls/acl.sh.in
new file mode 100644
index 0000000..9ac404c
--- /dev/null
+++ b/tests/acls/acl.sh.in
@@ -0,0 +1,126 @@
+#!/bin/bash -x
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+BASICURL="http://127.0.0.1"
+BASICURLS="https://127.0.0.1"
+
+NETDATA_VARLIB_DIR="/var/lib/netdata"
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;43m'
+NOCOLOR='\033[0m'
+
+#change the previous acl file and with a new
+#and store it on a new file
+change_file(){
+ sed "s/$1/$2/g" netdata.cfg > "$4"
+}
+
+NETDATAPID=""
+
+change_ssl_file(){
+ KEYROW="ssl key = $3/key.pem"
+ CERTROW="ssl certificate = $3/cert.pem"
+ sed "s@ssl key =@$KEYROW@g" netdata.ssl.cfg > tmp
+ sed "s@ssl certificate =@$CERTROW@g" tmp > tmp2
+ sed "s/$1/$2/g" tmp2 > "$4"
+}
+
+run_acl_tests() {
+ #Give a time for netdata start properly
+ sleep 2
+
+ curl -v -k --tls-max 1.2 --create-dirs -o index.html "$2" 2> log_index.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o netdata.txt "$2/netdata.conf" 2> log_nc.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o badge.csv "$2/api/v1/badge.svg?chart=cpu.cpu0_interrupts" 2> log_badge.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o info.txt "$2/api/v1/info" 2> log_info.txt
+ curl -H "X-Auth-Token: $1" -v -k --tls-max 1.2 --create-dirs -o health.csv "$2/api/v1/manage/health?cmd=LIST" 2> log_health.txt
+
+ TOT=$(grep -c "HTTP/1.1 301" log_*.txt | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$4" ]; then
+ echo -e "${RED}I got a wrong number of redirects($TOT) when SSL is activated, It was expected $4 ${NOCOLOR}"
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ kill $NETDATAPID
+ exit 1
+ elif [ "$TOT" -eq "$4" ] && [ "$4" -ne "0" ]; then
+ echo -e "${YELLOW}I got the correct number of redirects($4) when SSL is activated and I try to access with HTTP. ${NOCOLOR}"
+ return
+ fi
+
+ TOT=$(grep -c "HTTP/1.1 200 OK" log_* | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$3" ]; then
+ echo -e "${RED}I got a wrong number of \"200 OK\" from the queries, it was expected $3. ${NOCOLOR}"
+ kill $NETDATAPID
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ exit 1
+ fi
+
+ echo -e "${GREEN}ACLs were applied correctly ${NOCOLOR}"
+}
+
+CONF=$(grep "bind" netdata.cfg)
+MUSER=$(grep run netdata.cfg | cut -d= -f2|sed 's/^[ \t]*//')
+
+openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -sha512 -subj "/C=US/ST=Denied/L=Somewhere/O=Dis/CN=www.example.com" -keyout key.pem -out cert.pem
+chown "$MUSER" key.pem cert.pem
+CWD=$(pwd)
+
+if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+else
+ TOKEN="NULL"
+fi
+
+change_file "$CONF" " bind to = *" "$CWD" "netdata.conf.test0"
+netdata -c "netdata.conf.test0" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 0
+kill $NETDATAPID
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=optional *:20002=dashboard|registry" "$CWD" "netdata.conf.test1"
+netdata -c "netdata.conf.test1" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+kill $NETDATAPID
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=force *:20002=dashboard|registry" "$CWD" "netdata.conf.test2"
+netdata -c "netdata.conf.test2" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+kill $NETDATAPID
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management^SSL=optional *:20001=dashboard|registry|netdata.conf^SSL=force" "$CWD" "netdata.conf.test3"
+netdata -c "netdata.conf.test3" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+kill $NETDATAPID
+
+rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+echo "All the tests were successful ${NOCOLOR}"
diff --git a/tests/acls/netdata.cfg b/tests/acls/netdata.cfg
new file mode 100644
index 0000000..1dcb4a5
--- /dev/null
+++ b/tests/acls/netdata.cfg
@@ -0,0 +1,20 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+[global]
+ run as user = netdata
+
+ # the default database size - 1 hour
+ history = 3600
+
+ # by default do not expose the netdata port
+ bind to = localhost
diff --git a/tests/acls/netdata.ssl.cfg b/tests/acls/netdata.ssl.cfg
new file mode 100644
index 0000000..28e0030
--- /dev/null
+++ b/tests/acls/netdata.ssl.cfg
@@ -0,0 +1,24 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+[global]
+ run as user = netdata
+
+ # the default database size - 1 hour
+ history = 3600
+
+ # by default do not expose the netdata port
+ bind to = localhost
+
+[web]
+ ssl key =
+ ssl certificate =
diff --git a/tests/alarm_repetition/alarm.sh.in b/tests/alarm_repetition/alarm.sh.in
new file mode 100644
index 0000000..09d6aaf
--- /dev/null
+++ b/tests/alarm_repetition/alarm.sh.in
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+#The health directory to put the alarms
+HEALTHDIR="@configdir_POST@/health.d/"
+
+#output directory
+OUTDIR="workdir/"
+
+#url to do download
+QUERY="/api/v1/alarms?active"
+MURL="http://localhost:19999$QUERY"
+
+#error messages
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+NOCOLOR='\033[0m'
+
+MYCDIR="$(pwd)"
+CONFFILE="$MYCDIR/netdata.conf"
+
+change_alarm_file() {
+ if [ -f "$1" ]; then
+ rm "$1"
+ fi
+
+ #copy keeping the permissions
+ cp -a "$2" "$3"
+}
+
+netdata_test_download() {
+ OPT="-e"
+ if [ "$3" == "I" ]; then
+ OPT="-v"
+ fi
+
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST="$?"
+ if [ "$TEST" -ne "0" ]; then
+ echo -e "${RED} Error to get the alarms. ${NOCOLOR}"
+ kill "$5"
+ rm "$HEALTHDIR/ram.conf"
+ exit 1
+ fi
+
+ COUNT=$(grep -w "\"last_repeat\":" "$2" | grep -c "$OPT" "\"0\"")
+ if [ "$COUNT" -eq "0" ]; then
+ echo -e "${RED} Netdata gave an unexpected result when alarm repetition is $4 ${NOCOLOR}"
+ killall "$5"
+ rm "$HEALTHDIR/ram.conf"
+ exit 1
+ fi
+
+ echo -e "${GREEN} I got the expected result ${NOCOLOR}"
+}
+
+get_the_logs() {
+ curl -v -k --create-dirs -o "$OUTDIR/$1.out" "$MURL" 2> "$OUTDIR/$1.err"
+ netdata_test_download "$OUTDIR/$1.err" "$OUTDIR/$1.out" "$2" "$3" "$4"
+}
+
+process_data() {
+ SEC=120
+ netdata -c "$CONFFILE" -D &
+ NETDATAPID=$!
+ echo -e "${NOCOLOR}Sleeping during $SEC seconds to create alarm entries"
+ sleep $SEC
+ get_the_logs "$1" "$2" "$3" "$NETDATAPID"
+ kill $NETDATAPID
+}
+
+mkdir "$OUTDIR"
+CREATEDIR="$?"
+if [ "$CREATEDIR" -ne "0" ]; then
+ echo -e "${RED}Cannot create the output directory, it already exists. The test will overwrite previous results. ${NOCOLOR}"
+fi
+
+change_alarm_file "./0" "ram_without_repetition.conf" "$HEALTHDIR/ram.conf"
+cp -a netdata.conf_without_repetition netdata.conf
+process_data "ram_without" "K" "not activated."
+rm netdata.conf
+
+change_alarm_file "$HEALTHDIR/ram.conf" "ram_with_repetition.conf" "$HEALTHDIR/ram.conf"
+cp -a netdata.conf_with_repetition netdata.conf
+process_data "ram_with" "I" "activated."
+rm netdata.conf
+
+echo -e "${GREEN} all the tests were sucessful ${NOCOLOR}"
+rm "$HEALTHDIR/ram.conf"
+rm -rf $OUTDIR
diff --git a/tests/alarm_repetition/netdata.conf_with_repetition b/tests/alarm_repetition/netdata.conf_with_repetition
new file mode 100644
index 0000000..d5d00f0
--- /dev/null
+++ b/tests/alarm_repetition/netdata.conf_with_repetition
@@ -0,0 +1,57 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+# global netdata configuration
+
+[global]
+ #run as user = netdata
+
+[web]
+ #ssl key = /etc/netdata/ssl/key2048.pem
+ #ssl certificate = /etc/netdata/ssl/cert2048.pem
+ mode = static-threaded
+ # listen backlog = 4096
+ default port = 19999
+ #bind to = *=dashboard|registry|streaming|netdata.conf|badges|management *:20000=dashboard|registry|streaming|netdata.conf|badges|management^SSL=optional *:20001=dashboard|registry|streaming|netdata.conf|badges|management^SSL=force unix:/tmp/netdata/netdata.sock
+ # web files owner = netdata
+ # web files group = netdata
+ #accept a streaming request every seconds = 2
+
+[plugins]
+ proc = yes
+ diskspace = no
+ cgroups = no
+ tc = no
+ idlejitter = no
+ enable running new plugins = no
+ check for new plugins every = 60
+ go.d = no
+ node.d = no
+ charts.d = no
+ nfacct = no
+ python.d = no
+ apps = no
+ fping = no
+ cups = no
+
+[health]
+ enabled = yes
+ in memory max health log entries = 1000
+ default repeat warning = 4s
+ default repeat critical = 2s
+
+[registry]
+ enabled = yes
+ allow from = *
+
+[cloud]
+ cloud base url = https://app.netdata.cloud
diff --git a/tests/alarm_repetition/netdata.conf_without_repetition b/tests/alarm_repetition/netdata.conf_without_repetition
new file mode 100644
index 0000000..43518bd
--- /dev/null
+++ b/tests/alarm_repetition/netdata.conf_without_repetition
@@ -0,0 +1,57 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+# global netdata configuration
+
+[global]
+ #run as user = netdata
+
+[web]
+ #ssl key = /etc/netdata/ssl/key2048.pem
+ #ssl certificate = /etc/netdata/ssl/cert2048.pem
+ mode = static-threaded
+ # listen backlog = 4096
+ default port = 19999
+ #bind to = *=dashboard|registry|streaming|netdata.conf|badges|management *:20000=dashboard|registry|streaming|netdata.conf|badges|management^SSL=optional *:20001=dashboard|registry|streaming|netdata.conf|badges|management^SSL=force unix:/tmp/netdata/netdata.sock
+ # web files owner = netdata
+ # web files group = netdata
+ #accept a streaming request every seconds = 2
+
+[plugins]
+ proc = yes
+ diskspace = no
+ cgroups = no
+ tc = no
+ idlejitter = no
+ enable running new plugins = no
+ check for new plugins every = 60
+ go.d = no
+ node.d = no
+ charts.d = no
+ nfacct = no
+ python.d = no
+ apps = no
+ fping = no
+ cups = no
+
+[health]
+ enabled = yes
+ in memory max health log entries = 1000
+ #default repeat warning = 4s
+ #default repeat critical = 2s
+
+[registry]
+ enabled = yes
+ allow from = *
+
+[cloud]
+ cloud base url = https://app.netdata.cloud
diff --git a/tests/alarm_repetition/ram_with_repetition.conf b/tests/alarm_repetition/ram_with_repetition.conf
new file mode 100644
index 0000000..c215a71
--- /dev/null
+++ b/tests/alarm_repetition/ram_with_repetition.conf
@@ -0,0 +1,64 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: used_ram_to_ignore
+ on: system.ram
+ os: linux freebsd
+ hosts: *
+ calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz)
+ every: 10s
+ info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
+
+ alarm: ram_in_use
+ on: system.ram
+ os: linux
+ hosts: *
+# calc: $used * 100 / ($used + $cached + $free)
+ calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free)
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 5
+ delay: down 15m multiplier 1.5 max 1h
+ info: system RAM used
+ to: sysadmin #alarms
+ repeat: warning 30s critical 60s
+
+ alarm: ram_available
+ on: mem.available
+ os: linux
+ hosts: *
+ calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
+
+## FreeBSD
+alarm: ram_in_use
+ on: system.ram
+ os: freebsd
+hosts: *
+ calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive)
+units: %
+every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+delay: down 15m multiplier 1.5 max 1h
+ info: system RAM usage
+ to: sysadmin #alarms
+
+ alarm: ram_available
+ on: system.ram
+ os: freebsd
+ hosts: *
+ calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
diff --git a/tests/alarm_repetition/ram_without_repetition.conf b/tests/alarm_repetition/ram_without_repetition.conf
new file mode 100644
index 0000000..edfc492
--- /dev/null
+++ b/tests/alarm_repetition/ram_without_repetition.conf
@@ -0,0 +1,63 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: used_ram_to_ignore
+ on: system.ram
+ os: linux freebsd
+ hosts: *
+ calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz)
+ every: 10s
+ info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
+
+ alarm: ram_in_use
+ on: system.ram
+ os: linux
+ hosts: *
+# calc: $used * 100 / ($used + $cached + $free)
+ calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free)
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 5
+ delay: down 15m multiplier 1.5 max 1h
+ info: system RAM used
+ to: sysadmin #alarms
+
+ alarm: ram_available
+ on: mem.available
+ os: linux
+ hosts: *
+ calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
+
+## FreeBSD
+alarm: ram_in_use
+ on: system.ram
+ os: freebsd
+hosts: *
+ calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive)
+units: %
+every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+delay: down 15m multiplier 1.5 max 1h
+ info: system RAM usage
+ to: sysadmin #alarms
+
+ alarm: ram_available
+ on: system.ram
+ os: freebsd
+ hosts: *
+ calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
diff --git a/tests/api/fuzzer.py b/tests/api/fuzzer.py
new file mode 100644
index 0000000..ee12a02
--- /dev/null
+++ b/tests/api/fuzzer.py
@@ -0,0 +1,378 @@
+import argparse
+import json
+import logging
+import posixpath
+import random
+import re
+import requests
+import string
+import sys
+import urllib.parse
+
+#######################################################################################################################
+# Utilities
+
+
+def some(s):
+ return random.choice(sorted(s))
+
+
+def not_some(s):
+ test_set = random.choice([string.ascii_uppercase + string.ascii_lowercase,
+ string.digits,
+ string.digits + ".E-",
+ '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJK'
+ 'LMNOPQRSTUVWXYZ!"#$%\'()*+,-./:;<=>?@[\\]^_`{|}~ '])
+ test_len = random.choice([1, 2, 3, 37, 61, 121])
+ while True:
+ x = ''.join([random.choice(test_set) for _ in range(test_len)])
+ if x not in s:
+ return x
+
+
+def build_url(host_maybe_scheme, base_path):
+ try:
+ if '//' not in host_maybe_scheme:
+ host_maybe_scheme = '//' + host_maybe_scheme
+ url_tuple = urllib.parse.urlparse(host_maybe_scheme)
+ if base_path[0] == '/':
+ base_path = base_path[1:]
+ return url_tuple.netloc, posixpath.join(url_tuple.path, base_path)
+ except Exception as e:
+ L.error(f"Critical failure decoding arguments -> {e}")
+ sys.exit(-1)
+
+
+#######################################################################################################################
+# Data-model and processing
+
+
+class Param(object):
+ def __init__(self, name, location, kind):
+ self.location = location
+ self.kind = kind
+ self.name = name
+ self.values = set()
+
+ def dump(self):
+ print(f"{self.name} in {self.location} is {self.kind} : {{{self.values}}}")
+
+
+def does_response_fit_schema(schema_path, schema, resp):
+ '''The schema_path argument tells us where we are (globally) in the schema. The schema argument is the
+ sub-tree within the schema json that we are validating against. The resp is the json subtree from the
+ target host's response.
+
+ The basic idea is this: swagger defines a model of valid json trees. In this sense it is a formal
+ language and we can validate a given server response by checking if the language accepts a particular
+ server response. This is basically a parser, but instead of strings we are operating on languages
+ of trees.
+
+ This could probably be extended to arbitrary swagger definitions - but the amount of work increases
+ rapidly as we attempt to cover the full semantics of languages of trees defined in swagger. Instead
+ we have some special cases that describe the parts of the semantics that we've used to describe the
+ netdata API.
+
+ If we hit an error (in the schema) that prevents further checks then we return early, otherwise we
+ try to collect as many errors as possible.
+ '''
+ success = True
+ if "type" not in schema:
+ L.error(f"Cannot progress past {schema_path} -> no type specified in dictionary")
+ print(json.dumps(schema, indent=2))
+ return False
+ if schema["type"] == "object":
+ if isinstance(resp, dict) and "properties" in schema and isinstance(schema["properties"], dict):
+ L.debug(f"Validate properties against dictionary at {schema_path}")
+ for k, v in schema["properties"].items():
+ L.debug(f"Validate {k} received with {v}")
+ if v.get("required", False) and k not in resp:
+ L.error(f"Missing {k} in response at {schema_path}")
+ print(json.dumps(resp, indent=2))
+ return False
+ if k in resp:
+ if not does_response_fit_schema(posixpath.join(schema_path, k), v, resp[k]):
+ success = False
+ elif isinstance(resp, dict) and "additionalProperties" in schema \
+ and isinstance(schema["additionalProperties"], dict):
+ kv_schema = schema["additionalProperties"]
+ L.debug(f"Validate additionalProperties against every value in dictionary at {schema_path}")
+ if "type" in kv_schema and kv_schema["type"] == "object":
+ for k, v in resp.items():
+ if not does_response_fit_schema(posixpath.join(schema_path, k), kv_schema, v):
+ success = False
+ else:
+ L.error("Don't understand what the additionalProperties means (it has no type?)")
+ return False
+ else:
+ L.error(f"Can't understand schema at {schema_path}")
+ print(json.dumps(schema, indent=2))
+ return False
+ elif schema["type"] == "string":
+ if isinstance(resp, str):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "boolean":
+ if isinstance(resp, bool):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "number":
+ if 'nullable' in schema and resp is None:
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path} (because nullable)")
+ return True
+ if isinstance(resp, int) or isinstance(resp, float):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "integer":
+ if 'nullable' in schema and resp is None:
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path} (because nullable)")
+ return True
+ if isinstance(resp, int):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "array":
+ if "items" not in schema:
+ L.error(f"Schema for array at {schema_path} does not specify items!")
+ return False
+ item_schema = schema["items"]
+ if not isinstance(resp, list):
+ L.error(f"Server did not return a list for {schema_path} (typed as array in schema)")
+ return False
+ for i, item in enumerate(resp):
+ if not does_response_fit_schema(posixpath.join(schema_path, str(i)), item_schema, item):
+ success = False
+ else:
+ L.error(f"Invalid swagger type {schema['type']} for {type(resp)} at {schema_path}")
+ print(json.dumps(schema, indent=2))
+ return False
+ return success
+
+
+class GetPath(object):
+ def __init__(self, url, spec):
+ self.url = url
+ self.req_params = {}
+ self.opt_params = {}
+ self.success = None
+ self.failures = {}
+ if 'parameters' in spec.keys():
+ for p in spec['parameters']:
+ name = p['name']
+ req = p.get('required', False)
+ target = self.req_params if req else self.opt_params
+ target[name] = Param(name, p['in'], p['type'])
+ if 'default' in p:
+ defs = p['default']
+ if isinstance(defs, list):
+ for d in defs:
+ target[name].values.add(d)
+ else:
+ target[name].values.add(defs)
+ if 'enum' in p:
+ for v in p['enum']:
+ target[name].values.add(v)
+ if req and len(target[name].values) == 0:
+ print(f"FAIL: No default values in swagger for required parameter {name} in {self.url}")
+ for code, schema in spec['responses'].items():
+ if code[0] == "2" and 'schema' in schema:
+ self.success = schema['schema']
+ elif code[0] == "2":
+ L.error(f"2xx response with no schema in {self.url}")
+ else:
+ self.failures[code] = schema
+
+ def generate_success(self, host):
+ url_args = "&".join([f"{p.name}={some(p.values)}" for p in self.req_params.values()])
+ base_url = urllib.parse.urljoin(host, self.url)
+ test_url = f"{base_url}?{url_args}"
+ if url_filter.match(test_url):
+ try:
+ resp = requests.get(url=test_url, verify=(not args.tls_no_verify))
+ self.validate(test_url, resp, True)
+ except Exception as e:
+ L.error(f"Network failure in test {e}")
+ else:
+ L.debug(f"url_filter skips {test_url}")
+
+ def generate_failure(self, host):
+ all_params = list(self.req_params.values()) + list(self.opt_params.values())
+ bad_param = ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ while bad_param in all_params:
+ bad_param = ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ all_params.append(Param(bad_param, "query", "string"))
+ url_args = "&".join([f"{p.name}={not_some(p.values)}" for p in all_params])
+ base_url = urllib.parse.urljoin(host, self.url)
+ test_url = f"{base_url}?{url_args}"
+ if url_filter.match(test_url):
+ try:
+ resp = requests.get(url=test_url, verify=(not args.tls_no_verify))
+ self.validate(test_url, resp, False)
+ except Exception as e:
+ L.error(f"Network failure in test {e}")
+
+ def validate(self, test_url, resp, expect_success):
+ try:
+ resp_json = json.loads(resp.text)
+ except json.decoder.JSONDecodeError as e:
+ L.error(f"Non-json response from {test_url}")
+ return
+ success_code = resp.status_code >= 200 and resp.status_code < 300
+ if success_code and expect_success:
+ if self.success is not None:
+ if does_response_fit_schema(posixpath.join(self.url, str(resp.status_code)), self.success, resp_json):
+ L.info(f"tested {test_url}")
+ else:
+ L.error(f"tested {test_url}")
+ else:
+ L.error(f"Missing schema {test_url}")
+ elif not success_code and not expect_success:
+ schema = self.failures.get(str(resp.status_code), None)
+ if schema is not None:
+ if does_response_fit_schema(posixpath.join(self.url, str(resp.status_code)), schema, resp_json):
+ L.info(f"tested {test_url}")
+ else:
+ L.error(f"tested {test_url}")
+ else:
+ L.error("Missing schema for {resp.status_code} from {test_url}")
+ else:
+ L.error(f"Received incorrect status code {resp.status_code} against {test_url}")
+
+
+def get_the_spec(url):
+ if url[:7] == "file://":
+ with open(url[7:]) as f:
+ return f.read()
+ return requests.get(url=url).text
+
+
+# Swagger paths look absolute but they are relative to the base.
+def not_absolute(path):
+ return path[1:] if path[0] == '/' else path
+
+
+def find_ref(spec, path):
+ if len(path) > 0 and path[0] == '#':
+ return find_ref(spec, path[1:])
+ if len(path) == 1:
+ return spec[path[0]]
+ return find_ref(spec[path[0]], path[1:])
+
+
+def resolve_refs(spec, spec_root=None):
+ '''Find all "$ref" keys in the swagger spec and inline their target schemas.
+
+ As with all inliners this will break if a definition recursively links to itself, but this should not
+ happen in swagger as embedding a structure inside itself would produce a record of infinite size.'''
+ if spec_root is None:
+ spec_root = spec
+ newspec = {}
+ for k, v in spec.items():
+ if k == "$ref":
+ path = v.split('/')
+ target = find_ref(spec_root, path)
+ # Unfold one level of the tree and erase the $ref if possible.
+ if isinstance(target, dict):
+ for kk, vv in resolve_refs(target, spec_root).items():
+ newspec[kk] = vv
+ else:
+ newspec[k] = target
+ elif isinstance(v, dict):
+ newspec[k] = resolve_refs(v, spec_root)
+ else:
+ newspec[k] = v
+ # This is an artifact of inline the $refs when they are inside a properties key as their children should be
+ # pushed up into the parent dictionary. They must be merged (union) rather than replace as we use this to
+ # implement polymorphism in the data-model.
+ if 'properties' in newspec and isinstance(newspec['properties'], dict) and \
+ 'properties' in newspec['properties']:
+ sub = newspec['properties']['properties']
+ del newspec['properties']['properties']
+ if 'type' in newspec['properties']:
+ del newspec['properties']['type']
+ for k, v in sub.items():
+ newspec['properties'][k] = v
+ return newspec
+
+
+#######################################################################################################################
+# Initialization
+
+random.seed(7) # Default is reproducible sequences
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--url', type=str,
+ default='https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.json',
+ help='The URL of the API definition in swagger. The default will pull the latest version '
+ 'from the main branch.')
+parser.add_argument('--host', type=str,
+ help='The URL of the target host to fuzz. The default will read the host from the swagger '
+ 'definition.')
+parser.add_argument('--reseed', action='store_true',
+ help="Pick a random seed for the PRNG. The default uses a constant seed for reproducibility.")
+parser.add_argument('--passes', action='store_true',
+ help="Log information about tests that pass")
+parser.add_argument('--detail', action='store_true',
+ help="Log information about the response/schema comparisons during each test")
+parser.add_argument('--filter', type=str,
+ default=".*",
+ help="Supply a regex used to filter the testing URLs generated")
+parser.add_argument('--tls-no-verify', action='store_true',
+ help="Disable TLS certification verification to allow connection to hosts that use"
+ "self-signed certificates")
+parser.add_argument('--dump-inlined', action='store_true',
+ help='Dump the inlined swagger spec instead of fuzzing. For "reasons".')
+
+args = parser.parse_args()
+if args.reseed:
+ random.seed()
+
+spec = json.loads(get_the_spec(args.url))
+inlined_spec = resolve_refs(spec)
+if args.dump_inlined:
+ print(json.dumps(inlined_spec, indent=2))
+ sys.exit(-1)
+
+logging.addLevelName(40, "FAIL")
+logging.addLevelName(20, "PASS")
+logging.addLevelName(10, "DETAIL")
+L = logging.getLogger()
+handler = logging.StreamHandler(sys.stdout)
+if not args.passes and not args.detail:
+ L.setLevel(logging.ERROR)
+elif args.passes and not args.detail:
+ L.setLevel(logging.INFO)
+elif args.detail:
+ L.setLevel(logging.DEBUG)
+handler.setFormatter(logging.Formatter(fmt="%(levelname)s %(message)s"))
+L.addHandler(handler)
+
+url_filter = re.compile(args.filter)
+
+if spec['swagger'] != '2.0':
+ L.error(f"Unexpected swagger version")
+ sys.exit(-1)
+L.info(f"Fuzzing {spec['info']['title']} / {spec['info']['version']}")
+
+host, base_url = build_url(args.host or spec['host'], inlined_spec['basePath'])
+
+L.info(f"Target host is {base_url}")
+paths = []
+for name, p in inlined_spec['paths'].items():
+ if 'get' in p:
+ name = not_absolute(name)
+ paths.append(GetPath(posixpath.join(base_url, name), p['get']))
+ elif 'put' in p:
+ L.error(f"Generation of PUT methods (for {name} is unimplemented")
+
+for s in inlined_spec['schemes']:
+ for p in paths:
+ resp = p.generate_success(s + "://" + host)
+ resp = p.generate_failure(s+"://"+host)
diff --git a/tests/backends/prometheus-avg-oldunits.txt b/tests/backends/prometheus-avg-oldunits.txt
new file mode 100644
index 0000000..53ee8ff
--- /dev/null
+++ b/tests/backends/prometheus-avg-oldunits.txt
@@ -0,0 +1,148 @@
+nd_apps_cpu_percent_average
+nd_apps_cpu_system_percent_average
+nd_apps_cpu_user_percent_average
+nd_apps_files_open_files_average
+nd_apps_lreads_kilobytes_persec_average
+nd_apps_lwrites_kilobytes_persec_average
+nd_apps_major_faults_page_faults_persec_average
+nd_apps_mem_MB_average
+nd_apps_minor_faults_page_faults_persec_average
+nd_apps_pipes_open_pipes_average
+nd_apps_preads_kilobytes_persec_average
+nd_apps_processes_processes_average
+nd_apps_pwrites_kilobytes_persec_average
+nd_apps_sockets_open_sockets_average
+nd_apps_swap_MB_average
+nd_apps_threads_threads_average
+nd_apps_vmem_MB_average
+nd_cpu_core_throttling_events_persec_average
+nd_cpu_cpu_percent_average
+nd_cpu_interrupts_interrupts_persec_average
+nd_cpu_softirqs_softirqs_persec_average
+nd_cpu_softnet_stat_events_persec_average
+nd_disk_avgsz_kilobytes_per_operation_average
+nd_disk_await_ms_per_operation_average
+nd_disk_backlog_milliseconds_average
+nd_disk_inodes_Inodes_average
+nd_disk_io_kilobytes_persec_average
+nd_disk_iotime_milliseconds_persec_average
+nd_disk_mops_merged_operations_persec_average
+nd_disk_ops_operations_persec_average
+nd_disk_space_GB_average
+nd_disk_svctm_ms_per_operation_average
+nd_disk_util___of_time_working_average
+nd_ip_bcast_kilobits_persec_average
+nd_ip_bcastpkts_packets_persec_average
+nd_ip_ecnpkts_packets_persec_average
+nd_ip_inerrors_packets_persec_average
+nd_ip_mcast_kilobits_persec_average
+nd_ip_mcastpkts_packets_persec_average
+nd_ip_tcp_accept_queue_packets_persec_average
+nd_ip_tcpconnaborts_connections_persec_average
+nd_ip_tcpofo_packets_persec_average
+nd_ip_tcpreorders_packets_persec_average
+nd_ipv4_errors_packets_persec_average
+nd_ipv4_icmp_errors_packets_persec_average
+nd_ipv4_icmpmsg_packets_persec_average
+nd_ipv4_icmp_packets_persec_average
+nd_ipv4_packets_packets_persec_average
+nd_ipv4_sockstat_sockets_sockets_average
+nd_ipv4_sockstat_tcp_mem_KB_average
+nd_ipv4_sockstat_tcp_sockets_sockets_average
+nd_ipv4_sockstat_udp_mem_KB_average
+nd_ipv4_sockstat_udp_sockets_sockets_average
+nd_ipv4_tcperrors_packets_persec_average
+nd_ipv4_tcphandshake_events_persec_average
+nd_ipv4_tcpopens_connections_persec_average
+nd_ipv4_tcppackets_packets_persec_average
+nd_ipv4_tcpsock_active_connections_average
+nd_ipv4_udperrors_events_persec_average
+nd_ipv4_udppackets_packets_persec_average
+nd_ipv6_ect_packets_persec_average
+nd_ipv6_errors_packets_persec_average
+nd_ipv6_icmperrors_errors_persec_average
+nd_ipv6_icmp_messages_persec_average
+nd_ipv6_icmpmldv2_reports_persec_average
+nd_ipv6_icmpneighbor_messages_persec_average
+nd_ipv6_icmprouter_messages_persec_average
+nd_ipv6_icmptypes_messages_persec_average
+nd_ipv6_mcast_kilobits_persec_average
+nd_ipv6_mcastpkts_packets_persec_average
+nd_ipv6_packets_packets_persec_average
+nd_ipv6_sockstat6_raw_sockets_sockets_average
+nd_ipv6_sockstat6_tcp_sockets_sockets_average
+nd_ipv6_sockstat6_udp_sockets_sockets_average
+nd_ipv6_udperrors_events_persec_average
+nd_ipv6_udppackets_packets_persec_average
+nd_mem_available_MB_average
+nd_mem_committed_MB_average
+nd_mem_kernel_MB_average
+nd_mem_pgfaults_page_faults_persec_average
+nd_mem_slab_MB_average
+nd_mem_transparent_hugepages_MB_average
+nd_mem_writeback_MB_average
+nd_netdata_apps_children_fix_percent_average
+nd_netdata_apps_cpu_milliseconds_persec_average
+nd_netdata_apps_fix_percent_average
+nd_netdata_apps_sizes_files_persec_average
+nd_netdata_clients_connected_clients_average
+nd_netdata_compression_ratio_percent_average
+nd_netdata_go_plugin_execution_time_ms_average
+nd_netdata_net_kilobits_persec_average
+nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
+nd_netdata_plugin_diskspace_dt_milliseconds_run_average
+nd_netdata_plugin_diskspace_milliseconds_persec_average
+nd_netdata_plugin_proc_cpu_milliseconds_persec_average
+nd_netdata_plugin_proc_modules_milliseconds_run_average
+nd_netdata_plugin_tc_cpu_milliseconds_persec_average
+nd_netdata_plugin_tc_time_milliseconds_run_average
+nd_netdata_private_charts_charts_average
+nd_netdata_pythond_runtime_ms_average
+nd_netdata_requests_requests_persec_average
+nd_netdata_response_time_milliseconds_request_average
+nd_netdata_server_cpu_milliseconds_persec_average
+nd_netdata_statsd_bytes_kilobits_persec_average
+nd_netdata_statsd_cpu_milliseconds_persec_average
+nd_netdata_statsd_events_events_persec_average
+nd_netdata_statsd_metrics_metrics_average
+nd_netdata_statsd_packets_packets_persec_average
+nd_netdata_statsd_reads_reads_persec_average
+nd_netdata_statsd_useful_metrics_metrics_average
+nd_netdata_tcp_connected_sockets_average
+nd_netdata_tcp_connects_events_average
+nd_netdata_web_cpu_milliseconds_persec_average
+nd_net_drops_drops_persec_average
+nd_net_net_kilobits_persec_average
+nd_net_packets_packets_persec_average
+nd_services_cpu_percent_average
+nd_services_mem_usage_MB_average
+nd_services_swap_usage_MB_average
+nd_services_throttle_io_ops_read_operations_persec_average
+nd_services_throttle_io_ops_write_operations_persec_average
+nd_services_throttle_io_read_kilobytes_persec_average
+nd_services_throttle_io_write_kilobytes_persec_average
+nd_system_active_processes_processes_average
+nd_system_cpu_percent_average
+nd_system_ctxt_context_switches_persec_average
+nd_system_entropy_entropy_average
+nd_system_forks_processes_persec_average
+nd_system_idlejitter_microseconds_lost_persec_average
+nd_system_interrupts_interrupts_persec_average
+nd_system_intr_interrupts_persec_average
+nd_system_io_kilobytes_persec_average
+nd_system_ipc_semaphore_arrays_arrays_average
+nd_system_ipc_semaphores_semaphores_average
+nd_system_ip_kilobits_persec_average
+nd_system_ipv6_kilobits_persec_average
+nd_system_load_load_average
+nd_system_net_kilobits_persec_average
+nd_system_pgpgio_kilobytes_persec_average
+nd_system_processes_processes_average
+nd_system_ram_MB_average
+nd_system_shared_memory_bytes_bytes_average
+nd_system_shared_memory_segments_segments_average
+nd_system_softirqs_softirqs_persec_average
+nd_system_softnet_stat_events_persec_average
+nd_system_swapio_kilobytes_persec_average
+nd_system_swap_MB_average
+nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-avg.txt b/tests/backends/prometheus-avg.txt
new file mode 100644
index 0000000..1aedff2
--- /dev/null
+++ b/tests/backends/prometheus-avg.txt
@@ -0,0 +1,148 @@
+nd_apps_cpu_percentage_average
+nd_apps_cpu_system_percentage_average
+nd_apps_cpu_user_percentage_average
+nd_apps_files_open_files_average
+nd_apps_lreads_KiB_persec_average
+nd_apps_lwrites_KiB_persec_average
+nd_apps_major_faults_page_faults_persec_average
+nd_apps_mem_MiB_average
+nd_apps_minor_faults_page_faults_persec_average
+nd_apps_pipes_open_pipes_average
+nd_apps_preads_KiB_persec_average
+nd_apps_processes_processes_average
+nd_apps_pwrites_KiB_persec_average
+nd_apps_sockets_open_sockets_average
+nd_apps_swap_MiB_average
+nd_apps_threads_threads_average
+nd_apps_vmem_MiB_average
+nd_cpu_core_throttling_events_persec_average
+nd_cpu_cpu_percentage_average
+nd_cpu_interrupts_interrupts_persec_average
+nd_cpu_softirqs_softirqs_persec_average
+nd_cpu_softnet_stat_events_persec_average
+nd_disk_avgsz_KiB_operation_average
+nd_disk_await_milliseconds_operation_average
+nd_disk_backlog_milliseconds_average
+nd_disk_inodes_inodes_average
+nd_disk_io_KiB_persec_average
+nd_disk_iotime_milliseconds_persec_average
+nd_disk_mops_merged_operations_persec_average
+nd_disk_ops_operations_persec_average
+nd_disk_space_GiB_average
+nd_disk_svctm_milliseconds_operation_average
+nd_disk_util___of_time_working_average
+nd_ip_bcast_kilobits_persec_average
+nd_ip_bcastpkts_packets_persec_average
+nd_ip_ecnpkts_packets_persec_average
+nd_ip_inerrors_packets_persec_average
+nd_ip_mcast_kilobits_persec_average
+nd_ip_mcastpkts_packets_persec_average
+nd_ip_tcp_accept_queue_packets_persec_average
+nd_ip_tcpconnaborts_connections_persec_average
+nd_ip_tcpofo_packets_persec_average
+nd_ip_tcpreorders_packets_persec_average
+nd_ipv4_errors_packets_persec_average
+nd_ipv4_icmp_errors_packets_persec_average
+nd_ipv4_icmpmsg_packets_persec_average
+nd_ipv4_icmp_packets_persec_average
+nd_ipv4_packets_packets_persec_average
+nd_ipv4_sockstat_sockets_sockets_average
+nd_ipv4_sockstat_tcp_mem_KiB_average
+nd_ipv4_sockstat_tcp_sockets_sockets_average
+nd_ipv4_sockstat_udp_mem_KiB_average
+nd_ipv4_sockstat_udp_sockets_sockets_average
+nd_ipv4_tcperrors_packets_persec_average
+nd_ipv4_tcphandshake_events_persec_average
+nd_ipv4_tcpopens_connections_persec_average
+nd_ipv4_tcppackets_packets_persec_average
+nd_ipv4_tcpsock_active_connections_average
+nd_ipv4_udperrors_events_persec_average
+nd_ipv4_udppackets_packets_persec_average
+nd_ipv6_ect_packets_persec_average
+nd_ipv6_errors_packets_persec_average
+nd_ipv6_icmperrors_errors_persec_average
+nd_ipv6_icmp_messages_persec_average
+nd_ipv6_icmpmldv2_reports_persec_average
+nd_ipv6_icmpneighbor_messages_persec_average
+nd_ipv6_icmprouter_messages_persec_average
+nd_ipv6_icmptypes_messages_persec_average
+nd_ipv6_mcast_kilobits_persec_average
+nd_ipv6_mcastpkts_packets_persec_average
+nd_ipv6_packets_packets_persec_average
+nd_ipv6_sockstat6_raw_sockets_sockets_average
+nd_ipv6_sockstat6_tcp_sockets_sockets_average
+nd_ipv6_sockstat6_udp_sockets_sockets_average
+nd_ipv6_udperrors_events_persec_average
+nd_ipv6_udppackets_packets_persec_average
+nd_mem_available_MiB_average
+nd_mem_committed_MiB_average
+nd_mem_kernel_MiB_average
+nd_mem_pgfaults_faults_persec_average
+nd_mem_slab_MiB_average
+nd_mem_transparent_hugepages_MiB_average
+nd_mem_writeback_MiB_average
+nd_netdata_apps_children_fix_percentage_average
+nd_netdata_apps_cpu_milliseconds_persec_average
+nd_netdata_apps_fix_percentage_average
+nd_netdata_apps_sizes_files_persec_average
+nd_netdata_clients_connected_clients_average
+nd_netdata_compression_ratio_percentage_average
+nd_netdata_go_plugin_execution_time_ms_average
+nd_netdata_net_kilobits_persec_average
+nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
+nd_netdata_plugin_diskspace_dt_milliseconds_run_average
+nd_netdata_plugin_diskspace_milliseconds_persec_average
+nd_netdata_plugin_proc_cpu_milliseconds_persec_average
+nd_netdata_plugin_proc_modules_milliseconds_run_average
+nd_netdata_plugin_tc_cpu_milliseconds_persec_average
+nd_netdata_plugin_tc_time_milliseconds_run_average
+nd_netdata_private_charts_charts_average
+nd_netdata_pythond_runtime_ms_average
+nd_netdata_requests_requests_persec_average
+nd_netdata_response_time_milliseconds_request_average
+nd_netdata_server_cpu_milliseconds_persec_average
+nd_netdata_statsd_bytes_kilobits_persec_average
+nd_netdata_statsd_cpu_milliseconds_persec_average
+nd_netdata_statsd_events_events_persec_average
+nd_netdata_statsd_metrics_metrics_average
+nd_netdata_statsd_packets_packets_persec_average
+nd_netdata_statsd_reads_reads_persec_average
+nd_netdata_statsd_useful_metrics_metrics_average
+nd_netdata_tcp_connected_sockets_average
+nd_netdata_tcp_connects_events_average
+nd_netdata_web_cpu_milliseconds_persec_average
+nd_net_drops_drops_persec_average
+nd_net_net_kilobits_persec_average
+nd_net_packets_packets_persec_average
+nd_services_cpu_percentage_average
+nd_services_mem_usage_MiB_average
+nd_services_swap_usage_MiB_average
+nd_services_throttle_io_ops_read_operations_persec_average
+nd_services_throttle_io_ops_write_operations_persec_average
+nd_services_throttle_io_read_KiB_persec_average
+nd_services_throttle_io_write_KiB_persec_average
+nd_system_active_processes_processes_average
+nd_system_cpu_percentage_average
+nd_system_ctxt_context_switches_persec_average
+nd_system_entropy_entropy_average
+nd_system_forks_processes_persec_average
+nd_system_idlejitter_microseconds_lost_persec_average
+nd_system_interrupts_interrupts_persec_average
+nd_system_intr_interrupts_persec_average
+nd_system_io_KiB_persec_average
+nd_system_ipc_semaphore_arrays_arrays_average
+nd_system_ipc_semaphores_semaphores_average
+nd_system_ip_kilobits_persec_average
+nd_system_ipv6_kilobits_persec_average
+nd_system_load_load_average
+nd_system_net_kilobits_persec_average
+nd_system_pgpgio_KiB_persec_average
+nd_system_processes_processes_average
+nd_system_ram_MiB_average
+nd_system_shared_memory_bytes_bytes_average
+nd_system_shared_memory_segments_segments_average
+nd_system_softirqs_softirqs_persec_average
+nd_system_softnet_stat_events_persec_average
+nd_system_swapio_KiB_persec_average
+nd_system_swap_MiB_average
+nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-raw.txt b/tests/backends/prometheus-raw.txt
new file mode 100644
index 0000000..2ac4c2c
--- /dev/null
+++ b/tests/backends/prometheus-raw.txt
@@ -0,0 +1,156 @@
+nd_apps_cpu
+nd_apps_cpu_system
+nd_apps_cpu_user
+nd_apps_files
+nd_apps_lreads
+nd_apps_lwrites
+nd_apps_major_faults
+nd_apps_mem
+nd_apps_minor_faults
+nd_apps_pipes
+nd_apps_preads
+nd_apps_processes
+nd_apps_pwrites
+nd_apps_sockets
+nd_apps_swap
+nd_apps_threads
+nd_apps_vmem
+nd_cpu_core_throttling_total
+nd_cpu_cpu_total
+nd_cpu_interrupts_total
+nd_cpu_softirqs_total
+nd_cpu_softnet_stat_total
+nd_disk_avgsz
+nd_disk_await
+nd_disk_backlog_total
+nd_disk_inodes
+nd_disk_iotime_total
+nd_disk_io_total
+nd_disk_mops_total
+nd_disk_ops_total
+nd_disk_space
+nd_disk_svctm
+nd_disk_util_total
+nd_ip_bcastpkts_total
+nd_ip_bcast_total
+nd_ip_ecnpkts_total
+nd_ip_inerrors_total
+nd_ip_mcastpkts_total
+nd_ip_mcast_total
+nd_ip_tcp_accept_queue_total
+nd_ip_tcpconnaborts_total
+nd_ip_tcpofo_total
+nd_ip_tcpreorders_total
+nd_ipv4_errors_total
+nd_ipv4_icmp_errors_total
+nd_ipv4_icmpmsg_total
+nd_ipv4_icmp_total
+nd_ipv4_packets_total
+nd_ipv4_sockstat_sockets
+nd_ipv4_sockstat_tcp_mem
+nd_ipv4_sockstat_tcp_sockets
+nd_ipv4_sockstat_udp_mem
+nd_ipv4_sockstat_udp_sockets
+nd_ipv4_tcperrors_total
+nd_ipv4_tcphandshake_total
+nd_ipv4_tcpopens_total
+nd_ipv4_tcppackets_total
+nd_ipv4_tcpsock
+nd_ipv4_udperrors_total
+nd_ipv4_udppackets_total
+nd_ipv6_ect_total
+nd_ipv6_errors_total
+nd_ipv6_icmperrors_total
+nd_ipv6_icmpmldv2_total
+nd_ipv6_icmpneighbor_total
+nd_ipv6_icmprouter_total
+nd_ipv6_icmp_total
+nd_ipv6_icmptypes_total
+nd_ipv6_mcastpkts_total
+nd_ipv6_mcast_total
+nd_ipv6_packets_total
+nd_ipv6_sockstat6_raw_sockets
+nd_ipv6_sockstat6_tcp_sockets
+nd_ipv6_sockstat6_udp_sockets
+nd_ipv6_udperrors_total
+nd_ipv6_udppackets_total
+nd_mem_available
+nd_mem_committed
+nd_mem_kernel
+nd_mem_pgfaults_total
+nd_mem_slab
+nd_mem_transparent_hugepages
+nd_mem_writeback
+nd_netdata_apps_children_fix
+nd_netdata_apps_cpu_total
+nd_netdata_apps_fix
+nd_netdata_apps_sizes_calls_total
+nd_netdata_apps_sizes_fds
+nd_netdata_apps_sizes_filenames_total
+nd_netdata_apps_sizes_files_total
+nd_netdata_apps_sizes_inode_changes_total
+nd_netdata_apps_sizes_link_changes_total
+nd_netdata_apps_sizes_new_pids_total
+nd_netdata_apps_sizes_pids
+nd_netdata_apps_sizes_targets
+nd_netdata_clients
+nd_netdata_compression_ratio
+nd_netdata_go_plugin_execution_time
+nd_netdata_net_total
+nd_netdata_plugin_cgroups_cpu_total
+nd_netdata_plugin_diskspace_dt
+nd_netdata_plugin_diskspace_total
+nd_netdata_plugin_proc_cpu_total
+nd_netdata_plugin_proc_modules
+nd_netdata_plugin_tc_cpu_total
+nd_netdata_plugin_tc_time
+nd_netdata_private_charts
+nd_netdata_pythond_runtime
+nd_netdata_requests_total
+nd_netdata_response_time
+nd_netdata_server_cpu_total
+nd_netdata_statsd_bytes_total
+nd_netdata_statsd_cpu_total
+nd_netdata_statsd_events_total
+nd_netdata_statsd_metrics
+nd_netdata_statsd_packets_total
+nd_netdata_statsd_reads_total
+nd_netdata_statsd_useful_metrics
+nd_netdata_tcp_connected
+nd_netdata_tcp_connects_total
+nd_netdata_web_cpu_total
+nd_net_drops_total
+nd_net_net_total
+nd_net_packets_total
+nd_services_cpu_total
+nd_services_mem_usage
+nd_services_swap_usage
+nd_services_throttle_io_ops_read_total
+nd_services_throttle_io_ops_write_total
+nd_services_throttle_io_read_total
+nd_services_throttle_io_write_total
+nd_system_active_processes
+nd_system_cpu_total
+nd_system_ctxt_total
+nd_system_entropy
+nd_system_forks_total
+nd_system_idlejitter
+nd_system_interrupts_total
+nd_system_intr_total
+nd_system_io_total
+nd_system_ipc_semaphore_arrays
+nd_system_ipc_semaphores
+nd_system_ip_total
+nd_system_ipv6_total
+nd_system_load
+nd_system_net_total
+nd_system_pgpgio_total
+nd_system_processes
+nd_system_ram
+nd_system_shared_memory_bytes
+nd_system_shared_memory_segments
+nd_system_softirqs_total
+nd_system_softnet_stat_total
+nd_system_swap
+nd_system_swapio_total
+nd_system_uptime
diff --git a/tests/backends/prometheus.bats b/tests/backends/prometheus.bats
new file mode 100755
index 0000000..d52f39d
--- /dev/null
+++ b/tests/backends/prometheus.bats
@@ -0,0 +1,31 @@
+#!/usr/bin/env bats
+
+validate_metrics() {
+ fname="${1}"
+ params="${2}"
+
+ curl -sS "http://localhost:19999/api/v1/allmetrics?format=prometheus&prefix=nd&timestamps=no${params}" |
+ grep -E 'nd_system_|nd_cpu_|nd_system_|nd_net_|nd_disk_|nd_ip_|nd_ipv4_|nd_ipv6_|nd_mem_|nd_netdata_|nd_apps_|nd_services_' |
+ sed -ne 's/{.*//p' | sort | uniq > tests/backends/new-${fname}
+ diff tests/backends/${fname} tests/backends/new-${fname}
+ rm tests/backends/new-${fname}
+}
+
+
+if [ ! -f .gitignore ]; then
+ echo "Need to run as ./tests/backends/$(basename "$0") from top level directory of git repository" >&2
+ exit 1
+fi
+
+
+@test "prometheus raw" {
+ validate_metrics prometheus-raw.txt "&data=raw"
+}
+
+@test "prometheus avg" {
+ validate_metrics prometheus-avg.txt ""
+}
+
+@test "prometheus avg oldunits" {
+ validate_metrics prometheus-avg-oldunits.txt "&oldunits=yes"
+}
diff --git a/tests/health_mgmtapi/README.md b/tests/health_mgmtapi/README.md
new file mode 100644
index 0000000..1a4b2b1
--- /dev/null
+++ b/tests/health_mgmtapi/README.md
@@ -0,0 +1,15 @@
+<!--
+title: "Health command API tester"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/tests/health_mgmtapi/README.md
+-->
+
+# Health command API tester
+
+The directory `tests/health_cmdapi` contains the test script `health-cmdapi-test.sh` for the [health command API](/web/api/health/README.md).
+
+The script can be executed with options to prepare the system for the tests, run them and restore the system to its previous state.
+
+It depends on the management API being accessible on localhost:19999 and on the responses to the api/v1/alarms?all requests being functional.
+It also requires read access to the management API key that is usually under `/var/lib/netdata/netdata.api.key` (`@varlibdir_POST@/netdata.api.key`).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2Fhealth_mgmtapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
new file mode 100644
index 0000000..9f05efe
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_iowait" }, { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
new file mode 100644
index 0000000..dbf8799
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "context": "system.cpu" }, { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json b/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
new file mode 100644
index 0000000..a267cfd
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "DISABLE", "silencers": [ { "context": "system.cpu" }, { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE-list.json b/tests/health_mgmtapi/expected_list/DISABLE-list.json
new file mode 100644
index 0000000..c2c7781
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "DISABLE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
new file mode 100644
index 0000000..bbc3f4f
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
@@ -0,0 +1 @@
+{ "all": true, "type": "DISABLE", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
new file mode 100644
index 0000000..e8aee17
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
@@ -0,0 +1 @@
+Auth Error
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json b/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
new file mode 100644
index 0000000..a7fc1cb
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "DISABLE", "silencers": [ { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json b/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
new file mode 100644
index 0000000..50119f7
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "None", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/HOSTS-list.json b/tests/health_mgmtapi/expected_list/HOSTS-list.json
new file mode 100644
index 0000000..9db21b6
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/HOSTS-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "hosts": "*" } ] }
diff --git a/tests/health_mgmtapi/expected_list/RESET-list.json b/tests/health_mgmtapi/expected_list/RESET-list.json
new file mode 100644
index 0000000..2d3f09d
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/RESET-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "None", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE-list.json b/tests/health_mgmtapi/expected_list/SILENCE-list.json
new file mode 100644
index 0000000..d157f2d
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_2-list.json b/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
new file mode 100644
index 0000000..d5e6fa2
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_3-list.json b/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
new file mode 100644
index 0000000..69e98cc
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [] } WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
new file mode 100644
index 0000000..dd789cd
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
new file mode 100644
index 0000000..d157f2d
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
new file mode 100644
index 0000000..c88ef9f
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
@@ -0,0 +1 @@
+{ "all": true, "type": "SILENCE", "silencers": [] }
diff --git a/tests/health_mgmtapi/health-cmdapi-test.sh.in b/tests/health_mgmtapi/health-cmdapi-test.sh.in
new file mode 100755
index 0000000..5abf2b1
--- /dev/null
+++ b/tests/health_mgmtapi/health-cmdapi-test.sh.in
@@ -0,0 +1,226 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117,SC2034,SC2059,SC2086,SC2181
+
+NETDATA_VARLIB_DIR="@varlibdir_POST@"
+
+check () {
+ sec=1
+ echo -e " ${GRAY}Check: '${1}' in $sec sec"
+ sleep $sec
+ number=$RANDOM
+ resp=$(curl -s "http://$URL/api/v1/alarms?all&$number")
+ r=$(echo "${resp}" | \
+ python3 -c "import sys, json; d=json.load(sys.stdin); \
+ print(\
+ d['alarms']['system.cpu.10min_cpu_usage']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_usage']['silenced'] , \
+ d['alarms']['system.cpu.10min_cpu_iowait']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_iowait']['silenced'], \
+ d['alarms']['system.load.load_trigger']['disabled'], \
+ d['alarms']['system.load.load_trigger']['silenced'], \
+ );" 2>&1)
+ if [ $? -ne 0 ] ; then
+ echo -e " ${RED}ERROR: Unexpected response stored in /tmp/resp-$number.json"
+ echo "$resp" > /tmp/resp-$number.json
+ err=$((err+1))
+ iter=0
+ elif [ "${r}" != "${2}" ] ; then
+ echo -e " ${GRAY}WARNING: 'Got ${r}'. Expected '${2}'"
+ iter=$((iter+1))
+ if [ $iter -lt 10 ] ; then
+ echo -e " ${GRAY}Repeating test "
+ check "$1" "$2"
+ else
+ echo -e " ${RED}ERROR: 'Got ${r}'. Expected '${2}'"
+ iter=0
+ err=$((err+1))
+ fi
+ else
+ echo -e " ${GREEN}Success"
+ iter=0
+ fi
+}
+
+cmd () {
+ echo -e "${WHITE}Cmd '${1}'"
+ echo -en " ${GRAY}Expecting '${2}' : "
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?${1}" -H "X-Auth-Token: $TOKEN" 2>&1)
+ if [ "${RESPONSE}" != "${2}" ] ; then
+ echo -e "${RED}ERROR: Response '${RESPONSE}'"
+ err=$((err+1))
+ else
+ echo -e "${GREEN}Success"
+ fi
+}
+
+check_list() {
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: $TOKEN" 2>&1)
+
+ NAME="$1-list.json"
+ echo $RESPONSE > $NAME
+ diff $NAME expected_list/$NAME 1>/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo -e "${GREEN}Success: The list command got the correct answer for $NAME!"
+ else
+ echo -e "${RED}ERROR: the files $NAME and expected_list/$NAME does not match."
+ exit 1
+ fi
+}
+
+WHITE='\033[0;37m'
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+GRAY='\033[0;37m'
+
+SETUP=0
+RESTART=0
+CLEANUP=0
+TEST=0
+URL="localhost:19999"
+
+err=0
+
+
+ HEALTH_CMDAPI_MSG_AUTHERROR="Auth Error"
+ HEALTH_CMDAPI_MSG_SILENCEALL="All alarm notifications are silenced"
+ HEALTH_CMDAPI_MSG_DISABLEALL="All health checks are disabled"
+ HEALTH_CMDAPI_MSG_RESET="All health checks and notifications are enabled"
+ HEALTH_CMDAPI_MSG_DISABLE="Health checks disabled for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_SILENCE="Alarm notifications silenced for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_ADDED="Alarm selector added"
+ HEALTH_CMDAPI_MSG_INVALID_KEY="Invalid key. Ignoring it."
+ HEALTH_CMDAPI_MSG_STYPEWARNING="WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command."
+ HEALTH_CMDAPI_MSG_NOSELECTORWARNING="WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors."
+
+ if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r CORRECT_TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+ else
+ echo "${NETDATA_VARLIB_DIR}/netdata.api.key not found"
+ exit 1
+ fi
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test default state
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test auth failure
+ TOKEN="Wrong token"
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_AUTHERROR"
+ check "Default State" "False False False False False False"
+ check_list "DISABLE_ALL_ERROR"
+
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test disable
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_DISABLEALL"
+ check "All disabled" "True False True False True False"
+ check_list "DISABLE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test silence
+ cmd "cmd=SILENCE ALL" "$HEALTH_CMDAPI_MSG_SILENCEALL"
+ check "All silenced" "False True False True False True"
+ check_list "SILENCE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Add silencer by name
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger" "${resp}"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER"
+
+ # Convert to disable health checks
+ cmd "cmd=DISABLE" "$HEALTH_CMDAPI_MSG_DISABLE"
+ check "Disable notifications for alarm1 and load_trigger" "True False False False True False"
+ check_list "DISABLE"
+
+ # Convert back to silence notifications
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE"
+
+ # Add second silencer by name
+ cmd "alarm=*10min_cpu_iowait" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications for alarm1,alarm2 and load_trigger" "False True False True False True"
+ check_list "ALARM_CPU_IOWAIT"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer by chart
+ printf -v resp "$HEALTH_CMDAPI_MSG_DISABLE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=DISABLE&chart=system.load" "${resp}"
+ check "Default State" "False False False False True False"
+ check_list "DISABLE_SYSTEM_LOAD"
+
+ # Add silencer by context
+ cmd "context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Default State" "True False True False True False"
+ check_list "CONTEXT_SYSTEM_CPU"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add second condition to a selector (AND)
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger&chart=system.load" "${resp}"
+ check "Silence notifications load_trigger" "False False False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE"
+
+ # Add second selector with two conditions
+ cmd "alarm=*10min_cpu_usage *load_trigger&context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications load_trigger" "False True False False False True"
+ check_list "ALARM_CPU_USAGE"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer without a command to disable or silence alarms
+ printf -v resp "$HEALTH_CMDAPI_MSG_ADDED\n$HEALTH_CMDAPI_MSG_STYPEWARNING"
+ cmd "families=load" "${resp}"
+ check "Family selector with no command" "False False False False False False"
+ check_list "FAMILIES_LOAD"
+
+ # Add silence command
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence family load" "False False False False False True"
+ check_list "SILENCE_2"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add command without silencers
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_NOSELECTORWARNING"
+ cmd "cmd=SILENCE" "${resp}"
+ check "Command with no selector" "False False False False False False"
+ check_list "SILENCE_3"
+
+ # Add hosts silencer
+ cmd "hosts=*" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence all hosts" "False True False True False True"
+ check_list "HOSTS"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+if [ $err -gt 0 ] ; then
+ echo "$err error(s) found"
+ exit 1
+fi
diff --git a/tests/installer/checksums.sh b/tests/installer/checksums.sh
new file mode 100755
index 0000000..3fbbfa3
--- /dev/null
+++ b/tests/installer/checksums.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#
+# Mechanism to validate kickstart files integrity status
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pawel Krupa (pawel@netdata.cloud)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+# Author : Austin S. Hemmelgarn (austin@netdata.cloud)
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel 2>/dev/null || echo "")")
+CWD="$(git rev-parse --show-cdup 2>/dev/null || echo "")"
+if [ -n "$CWD" ] || [ "${TOP_LEVEL}" != "netdata" ]; then
+ echo "Run as ./tests/installer/$(basename "$0") from top level directory of netdata git repository"
+ echo "Kickstart validation process aborted"
+ exit 1
+fi
+
+check_file() {
+ README_MD5=$(grep "$1" "$2" | grep md5sum | grep curl | cut -d '"' -f2)
+ KICKSTART_URL="https://my-netdata.io/$1"
+ KICKSTART="packaging/installer/$1"
+ KICKSTART_MD5="$(md5sum "${KICKSTART}" | cut -d' ' -f1)"
+ CALCULATED_MD5="$(curl -Ss "${KICKSTART_URL}" | md5sum | cut -d ' ' -f 1)"
+
+ # Conditionally run the website validation
+ if [ -z "${LOCAL_ONLY}" ]; then
+ echo "Validating ${KICKSTART_URL} against local file ${KICKSTART} with MD5 ${KICKSTART_MD5}.."
+ if [ "$KICKSTART_MD5" = "$CALCULATED_MD5" ]; then
+ echo "${KICKSTART_URL} looks fine"
+ else
+ echo "${KICKSTART_URL} md5sum does not match local file, it needs to be updated"
+ exit 2
+ fi
+ fi
+
+ echo "Validating documentation for $1"
+ if [ "$KICKSTART_MD5" != "$README_MD5" ]; then
+ echo "Invalid checksum for $1 in $2."
+ echo "checksum in docs: $README_MD5"
+ echo "current checksum: $KICKSTART_MD5"
+ exit 2
+ else
+ echo "$1 MD5Sum is well documented"
+ fi
+}
+
+check_file kickstart.sh packaging/installer/methods/kickstart.md
+check_file kickstart-static64.sh packaging/installer/methods/kickstart-64.md
+
+echo "No problems found, exiting succesfully!"
diff --git a/tests/installer/slack.sh b/tests/installer/slack.sh
new file mode 100755
index 0000000..3f3eff6
--- /dev/null
+++ b/tests/installer/slack.sh
@@ -0,0 +1,65 @@
+# #No shebang necessary
+# BASH Lib: Simple incoming webhook for slack integration.
+#
+# The script expects the following parameters to be defined by the upper layer:
+# SLACK_NOTIFY_WEBHOOK_URL
+# SLACK_BOT_NAME
+# SLACK_CHANNEL
+#
+# Copyright:
+#
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud
+
+post_message() {
+ TYPE="$1"
+ MESSAGE="$2"
+ CUSTOM_CHANNEL="$3"
+
+ case "$TYPE" in
+ "PLAIN_MESSAGE")
+ curl -X POST --data-urlencode "payload={\"channel\": \"${SLACK_CHANNEL}\", \"username\": \"${SLACK_BOT_NAME}\", \"text\": \"${MESSAGE}\", \"icon_emoji\": \":space_invader:\"}" "${SLACK_NOTIFY_WEBHOOK_URL}"
+ ;;
+ "TRAVIS_MESSAGE")
+ if [ "${TRAVIS_EVENT_TYPE}" == "pull_request" ] || [ "${TRAVIS_BRANCH}" != "master" ] ; then
+ echo "Skipping notification due to build type."
+ return 0
+ fi
+
+ if [ -n "${CUSTOM_CHANNEL}" ]; then
+ echo "Sending travis message to custom channel ${CUSTOM_CHANNEL}"
+ OPTIONAL_CHANNEL_INFO="\"channel\": \"${CUSTOM_CHANNEL}\","
+ fi
+
+ POST_MESSAGE="{
+ ${OPTIONAL_CHANNEL_INFO}
+ \"text\": \"${TRAVIS_REPO_SLUG}, ${MESSAGE}\",
+ \"attachments\": [{
+ \"text\": \"${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}', on '${TRAVIS_OS_NAME}' \",
+ \"fallback\": \"I could not determine the build\",
+ \"callback_id\": \"\",
+ \"color\": \"#3AA3E3\",
+ \"attachment_type\": \"default\",
+ \"actions\": [
+ {
+ \"name\": \"${TRAVIS_BUILD_NUMBER}\",
+ \"text\": \"Build #${TRAVIS_BUILD_NUMBER}\",
+ \"type\": \"button\",
+ \"url\": \"${TRAVIS_BUILD_WEB_URL}\"
+ },
+ {
+ \"name\": \"${TRAVIS_JOB_NUMBER}\",
+ \"text\": \"Job #${TRAVIS_JOB_NUMBER}\",
+ \"type\": \"button\",
+ \"url\": \"${TRAVIS_JOB_WEB_URL}\"
+ }]
+ }]
+ }"
+ echo "Sending ${POST_MESSAGE}"
+ curl -X POST --data-urlencode "payload=${POST_MESSAGE}" "${SLACK_NOTIFY_WEBHOOK_URL}"
+ ;;
+ *)
+ echo "Unrecognized message type \"$TYPE\" was given"
+ return 1
+ ;;
+ esac
+}
diff --git a/tests/k6/data.js b/tests/k6/data.js
new file mode 100644
index 0000000..fb4e087
--- /dev/null
+++ b/tests/k6/data.js
@@ -0,0 +1,67 @@
+import http from "k6/http";
+import { log, check, group, sleep } from "k6";
+import { Rate } from "k6/metrics";
+
+// A custom metric to track failure rates
+var failureRate = new Rate("check_failure_rate");
+
+// Options
+export let options = {
+ stages: [
+ // Linearly ramp up from 1 to 20 VUs during first 30s
+ { target: 20, duration: "30s" },
+ // Hold at 50 VUs for the next 1 minute
+ { target: 20, duration: "1m" },
+ // Linearly ramp down from 50 to 0 VUs over the last 10 seconds
+ { target: 0, duration: "10s" }
+ ],
+ thresholds: {
+ // We want the 95th percentile of all HTTP request durations to be less than 500ms
+ "http_req_duration": ["p(95)<500"],
+ // Requests with the fast tag should finish even faster
+ "http_req_duration{fast:yes}": ["p(99)<250"],
+ // Thresholds based on the custom metric we defined and use to track application failures
+ "check_failure_rate": [
+ // Global failure rate should be less than 1%
+ "rate<0.01",
+ // Abort the test early if it climbs over 5%
+ { threshold: "rate<=0.05", abortOnFail: true },
+ ],
+ },
+};
+
+function rnd(min, max) {
+ min = Math.ceil(min);
+ max = Math.floor(max);
+ return Math.floor(Math.random() * (max - min)) + min; //The maximum is exclusive and the minimum is inclusive
+}
+
+// Main function
+export default function () {
+ // Control what the data request asks for
+ let charts = [ "example.random" ]
+ let chartmin = 0;
+ let chartmax = charts.length - 1;
+ let aftermin = 60;
+ let aftermax = 3600;
+ let beforemin = 3503600;
+ let beforemax = 3590000;
+ let pointsmin = 300;
+ let pointsmax = 3600;
+
+ group("Requests", function () {
+ // Execute multiple requests in parallel like a browser, to fetch data for the charts. The one taking the longes is the data request.
+ let resps = http.batch([
+ ["GET", "http://localhost:19999/api/v1/info", { tags: { fast: "yes" } }],
+ ["GET", "http://localhost:19999/api/v1/charts", { tags: { fast: "yes" } }],
+ ["GET", "http://localhost:19999/api/v1/data?chart="+charts[rnd(chartmin,chartmax)]+"&before=-"+rnd(beforemin,beforemax)+"&after=-"+rnd(aftermin,aftermax)+"&points="+rnd(pointsmin,pointsmax)+"&format=json&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&_="+rnd(1,1000000000000), { }],
+ ["GET", "http://localhost:19999/api/v1/alarms", { tags: { fast: "yes" } }]
+ ]);
+ // Combine check() call with failure tracking
+ failureRate.add(!check(resps, {
+ "status is 200": (r) => r[0].status === 200 && r[1].status === 200
+ }));
+ });
+
+ sleep(Math.random() * 2 + 1); // Random sleep between 1s and 3s
+}
diff --git a/tests/lifecycle.bats b/tests/lifecycle.bats
new file mode 100755
index 0000000..728c570
--- /dev/null
+++ b/tests/lifecycle.bats
@@ -0,0 +1,61 @@
+#!/usr/bin/env bats
+#
+# Netdata installation lifecycle testing script.
+# This is to validate the install, update and uninstall of netdata
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+#
+
+INSTALLATION="$BATS_TMPDIR/installation"
+ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
+# list of files which need to be checked. Path cannot start from '/'
+FILES="usr/libexec/netdata/plugins.d/go.d.plugin
+ usr/libexec/netdata/plugins.d/charts.d.plugin
+ usr/libexec/netdata/plugins.d/python.d.plugin
+ usr/libexec/netdata/plugins.d/node.d.plugin"
+
+DIRS="usr/sbin/netdata
+ etc/netdata
+ usr/share/netdata
+ usr/libexec/netdata
+ var/cache/netdata
+ var/lib/netdata
+ var/log/netdata"
+
+setup() {
+ # If we are not in netdata git repo, at the top level directory, fail
+ TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+ CWD=$(git rev-parse --show-cdup || echo "")
+ if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./tests/lifecycle/$(basename "$0") from top level directory of git repository"
+ exit 1
+ fi
+}
+
+@test "install netdata" {
+ ./netdata-installer.sh --dont-wait --dont-start-it --auto-update --install "${INSTALLATION}"
+
+ # Validate particular files
+ for file in $FILES; do
+ [ ! -f "$BATS_TMPDIR/$file" ]
+ done
+
+ # Validate particular directories
+ for a_dir in $DIRS; do
+ [ ! -d "$BATS_TMPDIR/$a_dir" ]
+ done
+}
+
+@test "update netdata" {
+ export ENVIRONMENT_FILE="${ENV}"
+ ${INSTALLATION}/netdata/usr/libexec/netdata/netdata-updater.sh --not-running-from-cron
+ ! grep "new_installation" "${ENV}"
+}
+
+@test "uninstall netdata" {
+ ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
+ [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
+ [ ! -f "/etc/cron.daily/netdata-updater" ]
+}
diff --git a/tests/node.d/fronius.chart.spec.js b/tests/node.d/fronius.chart.spec.js
new file mode 100644
index 0000000..92e88d2
--- /dev/null
+++ b/tests/node.d/fronius.chart.spec.js
@@ -0,0 +1,162 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "chart",
+ module: this
+});
+
+describe("fronius chart creation", function () {
+
+ var chartPrefix = "fronius_chart.";
+
+ beforeAll(function () {
+ // change this to enable debug log
+ netdata.options.DEBUG = false;
+ });
+
+ afterAll(function () {
+ deleteProperties(subject.charts)
+ });
+
+ it("should return a basic chart dimension", function () {
+ var result = subject.createBasicDimension("id", "name", 2);
+
+ expect(result.divisor).toBe(2);
+ expect(result.id).toBe("id");
+ expect(result.algorithm).toEqual(netdata.chartAlgorithms.absolute);
+ expect(result.multiplier).toBe(1);
+ });
+
+ it("should return the power chart definition", function () {
+ var suffix = "power";
+ var result = subject.getSitePowerChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("power");
+ expect(result.context).toBe("fronius.power");
+ expect(result.dimensions[subject.powerGridId].name).toBe("grid");
+ expect(result.dimensions[subject.powerPvId].name).toBe("photovoltaics");
+ expect(result.dimensions[subject.powerAccuId].name).toBe("accumulator");
+ expect(Object.keys(result.dimensions).length).toBe(3);
+ });
+
+ it("should return the consumption chart definition", function () {
+ var suffix = "Load";
+ var result = subject.getSiteConsumptionChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("consumption");
+ expect(result.context).toBe("fronius.consumption");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.consumptionLoadId].name).toBe("load");
+ });
+
+ it("should return the autonomy chart definition", function () {
+ var suffix = "Autonomy";
+ var result = subject.getSiteAutonomyChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("%");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("autonomy");
+ expect(result.context).toBe("fronius.autonomy");
+ expect(Object.keys(result.dimensions).length).toBe(3);
+ expect(result.dimensions[subject.autonomyId].name).toBe("autonomy");
+ expect(result.dimensions[subject.consumptionSelfId].name).toBe("self_consumption");
+ });
+
+ it("should return the energy today chart definition", function () {
+ var suffix = "Energy today";
+ var result = subject.getSiteEnergyTodayChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("kWh");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("energy");
+ expect(result.context).toBe("fronius.energy.today");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.energyTodayId].name).toBe("today");
+ });
+
+ it("should return the energy year chart definition", function () {
+ var suffix = "Energy year";
+ var result = subject.getSiteEnergyYearChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("kWh");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("energy");
+ expect(result.context).toBe("fronius.energy.year");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.energyYearId].name).toBe("year");
+ });
+
+ it("should return the inverter chart definition with a single numerical inverter", function () {
+ var inverters = {
+ "1": {}
+ };
+ var suffix = "numerical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions["1"].name).toBe("inverter_1");
+ });
+
+ it("should return the inverter chart definition with a single alphabetical inverter", function () {
+ var key = "Cellar";
+ var inverters = {
+ "Cellar": {}
+ };
+ var suffix = "alphabetical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[key].name).toBe(key);
+ });
+
+ it("should return the inverter chart definition with multiple alphanumerical inverter", function () {
+ var alpha = "Cellar";
+ var numerical = 1;
+ var inverters = {
+ "Cellar": {},
+ "1": {}
+ };
+ var suffix = "alphanumerical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(2);
+ expect(result.dimensions[alpha].name).toBe(alpha);
+ expect(result.dimensions[numerical].name).toBe("inverter_" + numerical);
+ });
+
+ it("should return the same chart definition on second call for lazy loading", function () {
+ var first = subject.getSitePowerChart(service, "id");
+ var second = subject.getSitePowerChart(service, "id");
+
+ expect(first).toBe(second);
+ });
+});
diff --git a/tests/node.d/fronius.parse.spec.js b/tests/node.d/fronius.parse.spec.js
new file mode 100644
index 0000000..e6f308f
--- /dev/null
+++ b/tests/node.d/fronius.parse.spec.js
@@ -0,0 +1,333 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "parse",
+ module: this
+});
+
+var root = {
+ "Body": {
+ "Data": {
+ "Site": {},
+ "Inverters": {}
+ }
+ }
+};
+
+describe("fronius parsing for power chart", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 3000 for P_Grid when rounded", function () {
+ site.P_Grid = 2999.501;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(3000);
+ });
+
+ it("should return -3000 for P_Grid", function () {
+ site.P_Grid = -3000;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(-3000);
+ });
+
+ it("should return 0 for P_Grid if it is null", function () {
+ site.P_Grid = null;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Grid if it is zero", function () {
+ site.P_Grid = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return -100 for P_Akku", function () {
+ // it is unclear whether negative values are possible for p_akku (couln't test, nor any API docs found).
+ site.P_Akku = -100;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(-100);
+ });
+
+ it("should return 0 for P_Akku if it is null", function () {
+ site.P_Akku = null;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Akku if it is zero", function () {
+ site.P_Akku = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100 for P_PV", function () {
+ site.P_PV = 100;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for P_PV if it is zero", function () {
+ site.P_PV = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_PV if it is null", function () {
+ site.P_PV = null;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_PV if it is negative", function () {
+ // solar panels shouldn't consume anything, only produce.
+ site.P_PV = -1;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+});
+
+describe("fronius parsing for consumption", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 1000 for P_Load when rounded", function () {
+ site.P_Load = 1000.499;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(1000);
+ });
+
+ it("should return absolute value for P_Load when negative", function () {
+ /*
+ with firmware 3.7.4 it is sometimes possible that negative values are returned for P_Load,
+ which makes absolutely no sense. There is always a device that consumes some electricity around the clock.
+ Best we can do is to make it a positive value, since 0 also doesn't make much sense.
+ This "workaround" seems to work, as there couldn't be any strange peaks observed during long-time testing.
+ */
+ site.P_Load = -50;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(50);
+ });
+
+ it("should return 0 for P_Load if it is null", function () {
+ site.P_Load = null;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Load if it is zero", function () {
+ site.P_Load = 0;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(0);
+ });
+
+});
+
+describe("fronius parsing for autonomy", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 100 for rel_Autonomy", function () {
+ site.rel_Autonomy = 100;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for rel_Autonomy if it is zero", function () {
+ site.rel_Autonomy = 0;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for rel_Autonomy if it is null", function () {
+ site.rel_Autonomy = null;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 20 for rel_Autonomy if it is 20", function () {
+ site.rel_Autonomy = 20.1;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(20);
+ });
+
+ it("should return 20 for rel_SelfConsumption if it is 19.5", function () {
+ site.rel_SelfConsumption = 19.5;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(20);
+ });
+
+ it("should return 100 for rel_SelfConsumption if it is null", function () {
+ /*
+ During testing it could be observed that the API is delivering null if the solar panels
+ do not produce enough energy to supply the local load. But in this case it should be 100, since all
+ the produced energy is directly consumed.
+ */
+ site.rel_SelfConsumption = null;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for rel_SelfConsumption if it is zero", function () {
+ site.rel_SelfConsumption = 0;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for solarConsumption if PV is null", function () {
+ site.P_PV = null;
+ site.P_Load = -1000;
+ var result = subject.parseAutonomyChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.solarConsumptionId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100 for solarConsumption if Load is higher than solar power", function () {
+ site.P_PV = 500;
+ site.P_Load = -1500;
+ var result = subject.parseAutonomyChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.solarConsumptionId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 50 for solarConsumption if Load is half than solar power", function () {
+ site.P_PV = 3000;
+ site.P_Load = -1500;
+ var result = subject.parseAutonomyChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.solarConsumptionId);
+ expect(result.value).toBe(50);
+ });
+});
+
+describe("fronius parsing for energy", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 10000 for E_Day", function () {
+ site.E_Day = 10000;
+ var result = subject.parseEnergyTodayChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyTodayId);
+ expect(result.value).toBe(10000);
+ });
+
+ it("should return 0 for E_Day if it is negative", function () {
+ /*
+ The solar panels can't produce negative energy, really. It would be a fault of the API.
+ */
+ site.E_Day = -0.4;
+ var result = subject.parseEnergyTodayChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyTodayId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100'000 for E_Year", function () {
+ site.E_Year = 100000.4;
+ var result = subject.parseEnergyYearChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyYearId);
+ expect(result.value).toBe(100000);
+ });
+
+ it("should return 0 for E_Year if it is negative", function () {
+ /*
+ A return value of 0 only makes sense in the silvester night anyway, when the counter is being reset.
+ A negative value is a fault from the API though. It wouldn't make sense.
+ */
+ site.E_Year = -1;
+ var result = subject.parseEnergyYearChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyYearId);
+ expect(result.value).toBe(0);
+ });
+});
+
+describe("fronius parsing for inverters", function () {
+
+ var inverters = root.Body.Data.Inverters;
+
+ afterEach(function () {
+ deleteProperties(inverters);
+ });
+
+ it("should return 1000 for P for inverter with name", function () {
+ inverters["cellar"] = {
+ P: 1000
+ };
+ var result = subject.parseInverterChart(service, inverters).dimensions[0];
+
+ expect(result.name).toBe("cellar");
+ expect(result.value).toBe(1000);
+ });
+
+});
diff --git a/tests/node.d/fronius.process.spec.js b/tests/node.d/fronius.process.spec.js
new file mode 100644
index 0000000..141fa8a
--- /dev/null
+++ b/tests/node.d/fronius.process.spec.js
@@ -0,0 +1,75 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "process",
+ module: this
+});
+
+var exampleResponse = {
+ "Body": {
+ "Data": {
+ "Site": {
+ "Mode": "meter",
+ "P_Grid": -3430.729923,
+ "P_Load": -910.270077,
+ "P_Akku": null,
+ "P_PV": 4341,
+ "rel_SelfConsumption": 20.969133,
+ "rel_Autonomy": 100,
+ "E_Day": 57230,
+ "E_Year": 6425915.5,
+ "E_Total": 15388710,
+ "Meter_Location": "grid"
+ },
+ "Inverters": {
+ "1": {
+ "DT": 123,
+ "P": 4341,
+ "E_Day": 57230,
+ "E_Year": 6425915.5,
+ "E_Total": 15388710
+ }
+ }
+ }
+ }
+};
+
+describe("fronius main processing", function () {
+
+ beforeAll(function () {
+ // change this to enable debug log
+ netdata.options.DEBUG = false;
+ });
+
+ beforeEach(function () {
+ deleteProperties(subject.charts);
+ });
+
+ it("should send parsed values to netdata", function () {
+ netdata.send = jasmine.createSpy("send");
+
+ subject.processResponse(service, exampleResponse);
+
+ expect(netdata.send.calls.count()).toBe(6);
+
+ // check if some parsed values were sent.
+ var powerChart = netdata.send.calls.argsFor(5)[0];
+
+ expect(powerChart).toContain("SET p_grid = -3431");
+ expect(powerChart).toContain("SET p_pv = 4341");
+
+ var inverterChart = netdata.send.calls.argsFor(0)[0];
+
+ expect(inverterChart).toContain("SET 1 = 4341");
+
+ var autonomyChart = netdata.send.calls.argsFor(3)[0];
+ expect(autonomyChart).toContain("SET rel_selfconsumption = 21");
+ });
+
+
+});
diff --git a/tests/node.d/fronius.validation.spec.js b/tests/node.d/fronius.validation.spec.js
new file mode 100644
index 0000000..b7938d5
--- /dev/null
+++ b/tests/node.d/fronius.validation.spec.js
@@ -0,0 +1,155 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "validation",
+ module: this
+});
+
+describe("fronius response validation", function () {
+
+ it("should do nothing if response is null", function () {
+ netdata.send = jasmine.createSpy("send");
+
+ subject.processResponse(service, null);
+ var result = netdata.send.calls.count();
+
+ expect(result).toBe(0);
+ });
+
+ it("should return null if response is null", function () {
+ var result = subject.convertToJson(null);
+
+ expect(result).toBeNull();
+ });
+
+ it("should return null and log error if response cannot be parsed", function () {
+ netdata.error = jasmine.createSpy("error");
+
+ // trailing commas are enough to create syntax exceptions
+ var result = subject.convertToJson("{name,}");
+
+ expect(result).toBeNull();
+ expect(netdata.error.calls.count()).toBe(1);
+ });
+
+ it("should return true if response is valid", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Site": {
+ "Mode": "meter"
+ },
+ "Inverters": {
+ "1": {}
+ }
+ }
+ }
+ });
+
+ expect(result).toBeTruthy();
+ });
+
+ it("should return false if response is missing data", function () {
+ var result = subject.isResponseValid({
+ "Body": {}
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+ it("should return false if response is missing inverter", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Site": {}
+ }
+ }
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+ it("should return false if response is missing inverter", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Inverters": {}
+ }
+ }
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+});
+
+describe("fronius configuration validation", function () {
+
+ it("should return 0 if there are no servers configured", function () {
+ var result = subject.configure({});
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0 if the servers array is empty", function () {
+ var result = subject.configure({
+ "servers": []
+ });
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0 if there is one server configured incorrectly", function () {
+ var result = subject.configure({
+ "servers": [{}]
+ });
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 1 if there is one server configured", function () {
+ subject.serviceExecute = jasmine.createSpy("serviceExecute");
+ var name = "solar1";
+ var result = subject.configure({
+ "servers": [{
+ "name": name,
+ "api_path": "/api/",
+ "hostname": "solar1.local"
+ }]
+ });
+
+ expect(result).toBe(1);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name, "solar1.local/api/", 5);
+ });
+
+ it("should return 2 if there are two servers configured", function () {
+ subject.serviceExecute = jasmine.createSpy("serviceExecute");
+ var name1 = "solar 1";
+ var name2 = "solar 2";
+ var result = subject.configure({
+ "servers": [
+ {
+ "name": name1,
+ "api_path": "/",
+ "hostname": "solar1.local"
+ },
+ {
+ "name": name2,
+ "api_path": "/",
+ "hostname": "solar2.local",
+ "update_every": 3
+ }
+ ]
+ });
+
+ expect(result).toBe(2);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name1, "solar1.local/", 5);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name2, "solar2.local/", 3);
+ });
+
+});
diff --git a/tests/profile/Makefile b/tests/profile/Makefile
new file mode 100644
index 0000000..9348a48
--- /dev/null
+++ b/tests/profile/Makefile
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+COMMON_CFLAGS = -I ../../ -DTARGET_OS=1 -Wall -Wextra
+PROFILE_CFLAGS = -O1 -ggdb $(COMMON_CFLAGS)
+PERFORMANCE_CFLAGS = -O2 $(COMMON_CFLAGS)
+
+CFLAGS = $(PERFORMANCE_CFLAGS)
+
+LIBNETDATA_FILES = \
+ ../../libnetdata/popen/popen.o \
+ ../../libnetdata/storage_number/storage_number.o \
+ ../../libnetdata/avl/avl.o \
+ ../../libnetdata/socket/socket.o \
+ ../../libnetdata/os.o \
+ ../../libnetdata/clocks/clocks.o \
+ ../../libnetdata/procfile/procfile.o \
+ ../../libnetdata/statistical/statistical.o \
+ ../../libnetdata/eval/eval.o \
+ ../../libnetdata/threads/threads.o \
+ ../../libnetdata/dictionary/dictionary.o \
+ ../../libnetdata/simple_pattern/simple_pattern.o \
+ ../../libnetdata/url/url.o \
+ ../../libnetdata/config/appconfig.o \
+ ../../libnetdata/libnetdata.o \
+ ../../libnetdata/buffer/buffer.o \
+ ../../libnetdata/adaptive_resortable_list/adaptive_resortable_list.o \
+ ../../libnetdata/locks/locks.o \
+ ../../libnetdata/log/log.o \
+ $(NULL)
+
+COMMON_LDFLAGS = $(LIBNETDATA_FILES) -pthread -lm
+
+all: statsd-stress benchmark-procfile-parser test-eval benchmark-dictionary benchmark-value-pairs
+
+benchmark-procfile-parser: benchmark-procfile-parser.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+benchmark-dictionary: benchmark-dictionary.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+benchmark-value-pairs: benchmark-value-pairs.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+statsd-stress: statsd-stress.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+test-eval: test-eval.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+clean:
+ rm -f benchmark-procfile-parser statsd-stress test-eval benchmark-dictionary benchmark-value-pairs
diff --git a/tests/profile/benchmark-dictionary.c b/tests/profile/benchmark-dictionary.c
new file mode 100644
index 0000000..30c098d
--- /dev/null
+++ b/tests/profile/benchmark-dictionary.c
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+/*
+ * 1. build netdata (as normally)
+ * 2. cd tests/profile/
+ * 3. compile with:
+ * gcc -O3 -Wall -Wextra -I ../../src/ -I ../../ -o benchmark-dictionary benchmark-dictionary.c ../../src/dictionary.o ../../src/log.o ../../src/avl.o ../../src/common.o -pthread
+ *
+ */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+
+struct myvalue {
+ int i;
+};
+
+void netdata_cleanup_and_exit(int ret) { exit(ret); }
+
+int main(int argc, char **argv) {
+ if(argc || argv) {;}
+
+// DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_WITH_STATISTICS);
+ DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_WITH_STATISTICS);
+ if(!dict) fatal("Cannot create dictionary.");
+
+ struct rusage start, end;
+ unsigned long long dt;
+ char buf[100 + 1];
+ struct myvalue value, *v;
+ int i, max = 30000000, max2;
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Inserting %d entries in the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ dictionary_set(dict, buf, &value, sizeof(struct myvalue));
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Added %d entries in %llu nanoseconds: %llu inserts per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Retrieving %d entries from the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ v = dictionary_get(dict, buf);
+ if(!v)
+ fprintf(stderr, "ERROR: cannot get value %d from the dictionary\n", i);
+ else if(v->i != i)
+ fprintf(stderr, "ERROR: expected %d but got %d\n", i, v->i);
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Read %d entries in %llu nanoseconds: %llu searches per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Resetting %d entries in the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ dictionary_set(dict, buf, &value, sizeof(struct myvalue));
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Reset %d entries in %llu nanoseconds: %llu resets per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Searching %d non-existing entries in the dictionary\n", max);
+ max2 = max * 2;
+ for(i = max; i < max2; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ v = dictionary_get(dict, buf);
+ if(v)
+ fprintf(stderr, "ERROR: cannot got non-existing value %d from the dictionary\n", i);
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Searched %d non-existing entries in %llu nanoseconds: %llu not found searches per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Deleting %d entries from the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ dictionary_del(dict, buf);
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Deleted %d entries in %llu nanoseconds: %llu deletes per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Destroying dictionary\n");
+ dictionary_destroy(dict);
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Destroyed in %llu nanoseconds\n", dt);
+
+ return 0;
+}
diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c
new file mode 100644
index 0000000..1d47cc8
--- /dev/null
+++ b/tests/profile/benchmark-line-parsing.c
@@ -0,0 +1,702 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <sys/time.h>
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#define simple_hash(name) ({ \
+ register unsigned char *__hash_source = (unsigned char *)(name); \
+ register uint32_t __hash_value = 0x811c9dc5; \
+ while (*__hash_source) { \
+ __hash_value *= 16777619; \
+ __hash_value ^= (uint32_t) *__hash_source++; \
+ } \
+ __hash_value; \
+})
+
+static inline uint32_t simple_hash2(const char *name) {
+ register unsigned char *s = (unsigned char *)name;
+ register uint32_t hval = 0x811c9dc5;
+ while (*s) {
+ hval *= 16777619;
+ hval ^= (uint32_t) *s++;
+ }
+ return hval;
+}
+
+static inline unsigned long long fast_strtoull(const char *s) {
+ register unsigned long long n = 0;
+ register char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ // n = (n << 1) + (n << 3) + (c - '0');
+ }
+ return n;
+}
+
+static uint32_t cache_hash = 0;
+static uint32_t rss_hash = 0;
+static uint32_t rss_huge_hash = 0;
+static uint32_t mapped_file_hash = 0;
+static uint32_t writeback_hash = 0;
+static uint32_t dirty_hash = 0;
+static uint32_t swap_hash = 0;
+static uint32_t pgpgin_hash = 0;
+static uint32_t pgpgout_hash = 0;
+static uint32_t pgfault_hash = 0;
+static uint32_t pgmajfault_hash = 0;
+static uint32_t inactive_anon_hash = 0;
+static uint32_t active_anon_hash = 0;
+static uint32_t inactive_file_hash = 0;
+static uint32_t active_file_hash = 0;
+static uint32_t unevictable_hash = 0;
+static uint32_t hierarchical_memory_limit_hash = 0;
+static uint32_t total_cache_hash = 0;
+static uint32_t total_rss_hash = 0;
+static uint32_t total_rss_huge_hash = 0;
+static uint32_t total_mapped_file_hash = 0;
+static uint32_t total_writeback_hash = 0;
+static uint32_t total_dirty_hash = 0;
+static uint32_t total_swap_hash = 0;
+static uint32_t total_pgpgin_hash = 0;
+static uint32_t total_pgpgout_hash = 0;
+static uint32_t total_pgfault_hash = 0;
+static uint32_t total_pgmajfault_hash = 0;
+static uint32_t total_inactive_anon_hash = 0;
+static uint32_t total_active_anon_hash = 0;
+static uint32_t total_inactive_file_hash = 0;
+static uint32_t total_active_file_hash = 0;
+static uint32_t total_unevictable_hash = 0;
+
+char *strings[] = {
+ "cache",
+ "rss",
+ "rss_huge",
+ "mapped_file",
+ "writeback",
+ "dirty",
+ "swap",
+ "pgpgin",
+ "pgpgout",
+ "pgfault",
+ "pgmajfault",
+ "inactive_anon",
+ "active_anon",
+ "inactive_file",
+ "active_file",
+ "unevictable",
+ "hierarchical_memory_limit",
+ "total_cache",
+ "total_rss",
+ "total_rss_huge",
+ "total_mapped_file",
+ "total_writeback",
+ "total_dirty",
+ "total_swap",
+ "total_pgpgin",
+ "total_pgpgout",
+ "total_pgfault",
+ "total_pgmajfault",
+ "total_inactive_anon",
+ "total_active_anon",
+ "total_inactive_file",
+ "total_active_file",
+ "total_unevictable",
+ NULL
+};
+
+unsigned long long values1[12] = { 0 };
+unsigned long long values2[12] = { 0 };
+unsigned long long values3[12] = { 0 };
+unsigned long long values4[12] = { 0 };
+unsigned long long values5[12] = { 0 };
+unsigned long long values6[12] = { 0 };
+
+#define NUMBER1 "12345678901234"
+#define NUMBER2 "23456789012345"
+#define NUMBER3 "34567890123456"
+#define NUMBER4 "45678901234567"
+#define NUMBER5 "56789012345678"
+#define NUMBER6 "67890123456789"
+#define NUMBER7 "78901234567890"
+#define NUMBER8 "89012345678901"
+#define NUMBER9 "90123456789012"
+#define NUMBER10 "12345678901234"
+#define NUMBER11 "23456789012345"
+
+// simple system strcmp()
+void test1() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+
+ if(unlikely(!strcmp(s, "cache")))
+ values1[i] = strtoull(NUMBER1, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss")))
+ values1[i] = strtoull(NUMBER2, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss_huge")))
+ values1[i] = strtoull(NUMBER3, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "mapped_file")))
+ values1[i] = strtoull(NUMBER4, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "writeback")))
+ values1[i] = strtoull(NUMBER5, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "dirty")))
+ values1[i] = strtoull(NUMBER6, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "swap")))
+ values1[i] = strtoull(NUMBER7, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgin")))
+ values1[i] = strtoull(NUMBER8, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgout")))
+ values1[i] = strtoull(NUMBER9, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgfault")))
+ values1[i] = strtoull(NUMBER10, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgmajfault")))
+ values1[i] = strtoull(NUMBER11, NULL, 10);
+ }
+}
+
+// inline simple_hash() with system strtoull()
+void test2() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values2[i] = strtoull(NUMBER1, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values2[i] = strtoull(NUMBER2, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values2[i] = strtoull(NUMBER3, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values2[i] = strtoull(NUMBER4, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values2[i] = strtoull(NUMBER5, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values2[i] = strtoull(NUMBER6, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values2[i] = strtoull(NUMBER7, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values2[i] = strtoull(NUMBER8, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values2[i] = strtoull(NUMBER9, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values2[i] = strtoull(NUMBER10, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values2[i] = strtoull(NUMBER11, NULL, 10);
+ }
+}
+
+// statement expression simple_hash(), system strtoull()
+void test3() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values3[i] = strtoull(NUMBER1, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values3[i] = strtoull(NUMBER2, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values3[i] = strtoull(NUMBER3, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values3[i] = strtoull(NUMBER4, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values3[i] = strtoull(NUMBER5, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values3[i] = strtoull(NUMBER6, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values3[i] = strtoull(NUMBER7, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values3[i] = strtoull(NUMBER8, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values3[i] = strtoull(NUMBER9, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values3[i] = strtoull(NUMBER10, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values3[i] = strtoull(NUMBER11, NULL, 10);
+ }
+}
+
+
+// inline simple_hash(), if-continue checks
+void test4() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
+ values4[i] = strtoull(NUMBER1, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
+ values4[i] = strtoull(NUMBER2, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
+ values4[i] = strtoull(NUMBER3, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
+ values4[i] = strtoull(NUMBER4, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
+ values4[i] = strtoull(NUMBER5, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
+ values4[i] = strtoull(NUMBER6, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
+ values4[i] = strtoull(NUMBER7, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
+ values4[i] = strtoull(NUMBER8, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
+ values4[i] = strtoull(NUMBER9, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
+ values4[i] = strtoull(NUMBER10, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
+ values4[i] = strtoull(NUMBER11, NULL, 0);
+ continue;
+ }
+ }
+}
+
+// inline simple_hash(), if-else-if-else-if (netdata default)
+void test5() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values5[i] = fast_strtoull(NUMBER1);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values5[i] = fast_strtoull(NUMBER2);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values5[i] = fast_strtoull(NUMBER3);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values5[i] = fast_strtoull(NUMBER4);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values5[i] = fast_strtoull(NUMBER5);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values5[i] = fast_strtoull(NUMBER6);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values5[i] = fast_strtoull(NUMBER7);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values5[i] = fast_strtoull(NUMBER8);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values5[i] = fast_strtoull(NUMBER9);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values5[i] = fast_strtoull(NUMBER10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values5[i] = fast_strtoull(NUMBER11);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+struct entry {
+ char *name;
+ uint32_t hash;
+ int found;
+ void (*func)(void *data1, void *data2);
+ void *data1;
+ void *data2;
+ struct entry *prev, *next;
+};
+
+struct base {
+ int iteration;
+ int registered;
+ int wanted;
+ int found;
+ struct entry *entries, *last;
+};
+
+static inline void callback(void *data1, void *data2) {
+ char *string = data1;
+ unsigned long long *value = data2;
+ *value = fast_strtoull(string);
+}
+
+static inline void callback_system_strtoull(void *data1, void *data2) {
+ char *string = data1;
+ unsigned long long *value = data2;
+ *value = strtoull(string, NULL, 10);
+}
+
+
+static inline struct base *entry(struct base *base, const char *name, void *data1, void *data2, void (*func)(void *, void *)) {
+ if(!base)
+ base = calloc(1, sizeof(struct base));
+
+ struct entry *e = malloc(sizeof(struct entry));
+ e->name = strdup(name);
+ e->hash = simple_hash2(e->name);
+ e->data1 = data1;
+ e->data2 = data2;
+ e->func = func;
+ e->prev = NULL;
+ e->next = base->entries;
+
+ if(base->entries) base->entries->prev = e;
+ else base->last = e;
+
+ base->entries = e;
+ base->registered++;
+ base->wanted = base->registered;
+
+ return base;
+}
+
+static inline int check(struct base *base, const char *s) {
+ uint32_t hash = simple_hash2(s);
+
+ if(likely(!strcmp(s, base->last->name))) {
+ base->last->found = 1;
+ base->found++;
+ if(base->last->func) base->last->func(base->last->data1, base->last->data2);
+ base->last = base->last->next;
+
+ if(!base->last)
+ base->last = base->entries;
+
+ if(base->found == base->registered)
+ return 1;
+
+ return 0;
+ }
+
+ // find it
+ struct entry *e;
+ for(e = base->entries; e ; e = e->next)
+ if(e->hash == hash && !strcmp(e->name, s))
+ break;
+
+ if(e == base->last) {
+ printf("ERROR\n");
+ exit(1);
+ }
+
+ if(e) {
+ // found
+
+ // run it
+ if(e->func) e->func(e->data1, e->data2);
+
+ // unlink it
+ if(e->next) e->next->prev = e->prev;
+ if(e->prev) e->prev->next = e->next;
+
+ if(base->entries == e)
+ base->entries = e->next;
+ }
+ else {
+ // not found
+
+ // create it
+ e = calloc(1, sizeof(struct entry));
+ e->name = strdup(s);
+ e->hash = hash;
+ }
+
+ // link it here
+ e->next = base->last;
+ if(base->last) {
+ e->prev = base->last->prev;
+ base->last->prev = e;
+
+ if(base->entries == base->last)
+ base->entries = e;
+ }
+ else
+ e->prev = NULL;
+
+ if(e->prev)
+ e->prev->next = e;
+
+ base->last = e->next;
+ if(!base->last)
+ base->last = base->entries;
+
+ e->found = 1;
+ base->found++;
+
+ if(base->found == base->registered)
+ return 1;
+
+ printf("relinked '%s' after '%s' and before '%s': ", e->name, e->prev?e->prev->name:"NONE", e->next?e->next->name:"NONE");
+ for(e = base->entries; e ; e = e->next) printf("%s ", e->name);
+ printf("\n");
+
+ return 0;
+}
+
+static inline void begin(struct base *base) {
+
+ if(unlikely(base->iteration % 60) == 1) {
+ base->wanted = 0;
+ struct entry *e;
+ for(e = base->entries; e ; e = e->next)
+ if(e->found) base->wanted++;
+ }
+
+ base->iteration++;
+ base->last = base->entries;
+ base->found = 0;
+}
+
+void test6() {
+
+ static struct base *base = NULL;
+
+ if(unlikely(!base)) {
+ base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull);
+ base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull);
+ base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull);
+ base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull);
+ base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull);
+ base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull);
+ base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull);
+ base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull);
+ base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull);
+ base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull);
+ base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull);
+ }
+
+ begin(base);
+
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ if(check(base, strings[i]))
+ break;
+ }
+}
+
+void test7() {
+
+ static struct base *base = NULL;
+
+ if(unlikely(!base)) {
+ base = entry(base, "cache", NUMBER1, &values6[0], callback);
+ base = entry(base, "rss", NUMBER2, &values6[1], callback);
+ base = entry(base, "rss_huge", NUMBER3, &values6[2], callback);
+ base = entry(base, "mapped_file", NUMBER4, &values6[3], callback);
+ base = entry(base, "writeback", NUMBER5, &values6[4], callback);
+ base = entry(base, "dirty", NUMBER6, &values6[5], callback);
+ base = entry(base, "swap", NUMBER7, &values6[6], callback);
+ base = entry(base, "pgpgin", NUMBER8, &values6[7], callback);
+ base = entry(base, "pgpgout", NUMBER9, &values6[8], callback);
+ base = entry(base, "pgfault", NUMBER10, &values6[9], callback);
+ base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback);
+ }
+
+ begin(base);
+
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ if(check(base, strings[i]))
+ break;
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+
+// ==============
+// --- Poor man cycle counting.
+static unsigned long tsc;
+
+static void begin_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+
+static unsigned long end_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+// ===============
+
+static unsigned long long clk;
+
+static void begin_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return;
+ clk = tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+static unsigned long long end_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return -1;
+ return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
+}
+
+void main(void)
+{
+ cache_hash = simple_hash("cache");
+ rss_hash = simple_hash("rss");
+ rss_huge_hash = simple_hash("rss_huge");
+ mapped_file_hash = simple_hash("mapped_file");
+ writeback_hash = simple_hash("writeback");
+ dirty_hash = simple_hash("dirty");
+ swap_hash = simple_hash("swap");
+ pgpgin_hash = simple_hash("pgpgin");
+ pgpgout_hash = simple_hash("pgpgout");
+ pgfault_hash = simple_hash("pgfault");
+ pgmajfault_hash = simple_hash("pgmajfault");
+ inactive_anon_hash = simple_hash("inactive_anon");
+ active_anon_hash = simple_hash("active_anon");
+ inactive_file_hash = simple_hash("inactive_file");
+ active_file_hash = simple_hash("active_file");
+ unevictable_hash = simple_hash("unevictable");
+ hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
+ total_cache_hash = simple_hash("total_cache");
+ total_rss_hash = simple_hash("total_rss");
+ total_rss_huge_hash = simple_hash("total_rss_huge");
+ total_mapped_file_hash = simple_hash("total_mapped_file");
+ total_writeback_hash = simple_hash("total_writeback");
+ total_dirty_hash = simple_hash("total_dirty");
+ total_swap_hash = simple_hash("total_swap");
+ total_pgpgin_hash = simple_hash("total_pgpgin");
+ total_pgpgout_hash = simple_hash("total_pgpgout");
+ total_pgfault_hash = simple_hash("total_pgfault");
+ total_pgmajfault_hash = simple_hash("total_pgmajfault");
+ total_inactive_anon_hash = simple_hash("total_inactive_anon");
+ total_active_anon_hash = simple_hash("total_active_anon");
+ total_inactive_file_hash = simple_hash("total_inactive_file");
+ total_active_file_hash = simple_hash("total_active_file");
+ total_unevictable_hash = simple_hash("total_unevictable");
+
+ unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7;
+ unsigned long max = 1000000;
+
+ // let the processor get up to speed
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test2();
+ c2 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test3();
+ c3 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test4();
+ c4 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test5();
+ c5 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test6();
+ c6 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test7();
+ c7 = end_clock();
+
+ for(i = 0; i < 11 ; i++)
+ printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]);
+
+ printf("\n\nRESULTS\n");
+ printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n"
+ "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n"
+ "test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n"
+ "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n"
+ "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n"
+ "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n"
+ "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n"
+ , c1
+ , c2
+ , c3
+ , c4
+ , c5
+ , c6
+ , c7
+ );
+
+}
diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c
new file mode 100644
index 0000000..991e2df
--- /dev/null
+++ b/tests/profile/benchmark-procfile-parser.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+#define PF_PREFIX "PROCFILE"
+#define PFWORDS_INCREASE_STEP 200
+#define PFLINES_INCREASE_STEP 10
+#define PROCFILE_INCREMENT_BUFFER 512
+extern size_t procfile_max_lines;
+extern size_t procfile_max_words;
+extern size_t procfile_max_allocation;
+
+
+static inline void pflines_reset(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
+
+ fl->len = 0;
+}
+
+static inline void pflines_free(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
+
+ freez(fl);
+}
+
+static inline void pfwords_reset(pfwords *fw) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting words");
+ fw->len = 0;
+}
+
+
+static inline void pfwords_add(procfile *ff, char *str) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
+
+ pfwords *fw = ff->words;
+ if(unlikely(fw->len == fw->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding words");
+
+ ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
+ fw->size += PFWORDS_INCREASE_STEP;
+ }
+
+ fw->words[fw->len++] = str;
+}
+
+NEVERNULL
+static inline size_t *pflines_add(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
+
+ pflines *fl = ff->lines;
+ if(unlikely(fl->len == fl->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
+
+ ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
+ fl->size += PFLINES_INCREASE_STEP;
+ }
+
+ ffline *ffl = &fl->lines[fl->len++];
+ ffl->words = 0;
+ ffl->first = ff->words->len;
+
+ return &ffl->words;
+}
+
+
+NOINLINE
+static void procfile_parser(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
+
+ char *s = ff->data // our current position
+ , *e = &ff->data[ff->len] // the terminating null
+ , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
+
+ // the look up array to find our type of character
+ PF_CHAR_TYPE *separators = ff->separators;
+
+ char quote = 0; // the quote character - only when in quoted string
+ size_t opened = 0; // counts the number of open parenthesis
+
+ size_t *line_words = pflines_add(ff);
+
+ while(s < e) {
+ PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
+
+ // this is faster than a switch()
+ // read more here: http://lazarenko.me/switch/
+ switch(ct) {
+ case PF_CHAR_IS_SEPARATOR:
+ if(!quote && !opened) {
+ if (s != t) {
+ // separator, but we have word before it
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ }
+ t = s + 1;
+ }
+ // fallthrough
+
+ case PF_CHAR_IS_WORD:
+ s++;
+ break;
+
+
+ case PF_CHAR_IS_NEWLINE:
+ // end of line
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+
+ // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
+
+ line_words = pflines_add(ff);
+ break;
+
+ case PF_CHAR_IS_QUOTE:
+ if(unlikely(!quote && s == t)) {
+ // quote opened at the beginning
+ quote = *s;
+ t = ++s;
+ }
+ else if(unlikely(quote && quote == *s)) {
+ // quote closed
+ quote = 0;
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ break;
+
+ case PF_CHAR_IS_OPEN:
+ if(s == t) {
+ opened++;
+ t = ++s;
+ }
+ else if(opened) {
+ opened++;
+ s++;
+ }
+ else
+ s++;
+ break;
+
+ case PF_CHAR_IS_CLOSE:
+ if(opened) {
+ opened--;
+
+ if(!opened) {
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ }
+ else
+ s++;
+ break;
+
+ default:
+ fatal("Internal Error: procfile_readall() does not handle all the cases.");
+ }
+ }
+
+ if(likely(s > t && t < e)) {
+ // the last word
+ if(unlikely(ff->len >= ff->size)) {
+ // we are going to loose the last byte
+ s = &ff->data[ff->size - 1];
+ }
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ // t = ++s;
+ }
+}
+
+
+procfile *procfile_readall1(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
+
+ ff->len = 0; // zero the used size
+ ssize_t r = 1; // read at least once
+ while(r > 0) {
+ ssize_t s = ff->len;
+ ssize_t x = ff->size - s;
+
+ if(unlikely(!x)) {
+ debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
+ ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
+ ff->size += PROCFILE_INCREMENT_BUFFER;
+ }
+
+ debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
+ r = read(ff->fd, &ff->data[s], ff->size - s);
+ if(unlikely(r == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
+ procfile_close(ff);
+ return NULL;
+ }
+
+ ff->len += r;
+ }
+
+ // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
+ if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
+ procfile_close(ff);
+ return NULL;
+ }
+
+ pflines_reset(ff->lines);
+ pfwords_reset(ff->words);
+ procfile_parser(ff);
+
+ if(unlikely(procfile_adaptive_initial_allocation)) {
+ if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
+ if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
+ if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
+ }
+
+ // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
+ return ff;
+}
+
+
+
+
+
+
+
+
+// ==============
+// --- Poor man cycle counting.
+static unsigned long tsc;
+
+void begin_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+
+unsigned long end_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+// ==============
+
+
+unsigned long test_netdata_internal(void) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) {
+ fprintf(stderr, "Failed to open filename\n");
+ exit(1);
+ }
+
+ begin_tsc();
+ ff = procfile_readall(ff);
+ unsigned long c = end_tsc();
+
+ if(!ff) {
+ fprintf(stderr, "Failed to read filename\n");
+ exit(1);
+ }
+
+ return c;
+}
+
+unsigned long test_method1(void) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) {
+ fprintf(stderr, "Failed to open filename\n");
+ exit(1);
+ }
+
+ begin_tsc();
+ ff = procfile_readall1(ff);
+ unsigned long c = end_tsc();
+
+ if(!ff) {
+ fprintf(stderr, "Failed to read filename\n");
+ exit(1);
+ }
+
+ return c;
+}
+
+//--- Test
+int main(int argc, char **argv)
+{
+ (void)argc; (void)argv;
+
+ int i, max = 1000000;
+
+ unsigned long c1 = 0;
+ test_netdata_internal();
+ for(i = 0; i < max ; i++)
+ c1 += test_netdata_internal();
+
+ unsigned long c2 = 0;
+ test_method1();
+ for(i = 0; i < max ; i++)
+ c2 += test_method1();
+
+ printf("netdata internal: completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c1, c1 / max, (float)c1 * 100.0 / (float)c1);
+ printf("method1 : completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c2, c2 / max, (float)c2 * 100.0 / (float)c1);
+
+ return 0;
+}
diff --git a/tests/profile/benchmark-registry.c b/tests/profile/benchmark-registry.c
new file mode 100644
index 0000000..cfed6d7
--- /dev/null
+++ b/tests/profile/benchmark-registry.c
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+/*
+ * compile with
+ * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o benchmark-registry benchmark-registry.c ../src/dictionary.o ../src/log.o ../src/avl.o ../src/common.o ../src/appconfig.o ../src/web_buffer.o ../src/storage_number.o ../src/rrd.o ../src/health.o -pthread -luuid -lm -DHAVE_CONFIG_H -DVARLIB_DIR="\"/tmp\""
+ */
+
+char *hostname = "me";
+
+#include "../src/registry.c"
+
+void netdata_cleanup_and_exit(int ret) { exit(ret); }
+
+// ----------------------------------------------------------------------------
+// TESTS
+
+int test1(int argc, char **argv) {
+
+ void print_stats(uint32_t requests, unsigned long long start, unsigned long long end) {
+ fprintf(stderr, " > SPEED: %u requests served in %0.2f seconds ( >>> %llu per second <<< )\n",
+ requests, (end-start) / 1000000.0, (unsigned long long)requests * 1000000ULL / (end-start));
+
+ fprintf(stderr, " > DB : persons %llu, machines %llu, unique URLs %llu, accesses %llu, URLs: for persons %llu, for machines %llu\n",
+ registry.persons_count, registry.machines_count, registry.urls_count, registry.usages_count,
+ registry.persons_urls_count, registry.machines_urls_count);
+ }
+
+ (void) argc;
+ (void) argv;
+
+ uint32_t u, users = 1000000;
+ uint32_t m, machines = 200000;
+ uint32_t machines2 = machines * 2;
+
+ char **users_guids = malloc(users * sizeof(char *));
+ char **machines_guids = malloc(machines2 * sizeof(char *));
+ char **machines_urls = malloc(machines2 * sizeof(char *));
+ unsigned long long start;
+
+ registry_init();
+
+ fprintf(stderr, "Generating %u machine guids\n", machines2);
+ for(m = 0; m < machines2 ;m++) {
+ uuid_t uuid;
+ machines_guids[m] = malloc(36+1);
+ uuid_generate(uuid);
+ uuid_unparse(uuid, machines_guids[m]);
+
+ char buf[FILENAME_MAX + 1];
+ snprintfz(buf, FILENAME_MAX, "http://%u.netdata.rocks/", m+1);
+ machines_urls[m] = strdup(buf);
+
+ // fprintf(stderr, "\tmachine %u: '%s', url: '%s'\n", m + 1, machines_guids[m], machines_urls[m]);
+ }
+
+ start = timems();
+ fprintf(stderr, "\nGenerating %u users accessing %u machines\n", users, machines);
+ m = 0;
+ time_t now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ if(++m == machines) m = 0;
+
+ PERSON *p = registry_request_access(NULL, machines_guids[m], machines_urls[m], "test", now);
+ users_guids[u] = p->guid;
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\nAll %u users accessing again the same %u servers\n", users, machines);
+ m = 0;
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ if(++m == machines) m = 0;
+
+ PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
+
+ if(p->guid != users_guids[u])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\nAll %u users accessing a new server, out of the %u servers\n", users, machines);
+ m = 1;
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ if(++m == machines) m = 0;
+
+ PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
+
+ if(p->guid != users_guids[u])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\n%u random users accessing a random server, out of the %u servers\n", users, machines);
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ uint32_t tu = random() * users / RAND_MAX;
+ uint32_t tm = random() * machines / RAND_MAX;
+
+ PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
+
+ if(p->guid != users_guids[tu])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\n%u random users accessing a random server, out of %u servers\n", users, machines2);
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ uint32_t tu = random() * users / RAND_MAX;
+ uint32_t tm = random() * machines2 / RAND_MAX;
+
+ PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
+
+ if(p->guid != users_guids[tu])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ for(m = 0; m < 10; m++) {
+ start = timems();
+ fprintf(stderr,
+ "\n%u random user accesses to a random server, out of %u servers,\n > using 1/10000 with a random url, 1/1000 with a mismatched url\n",
+ users * 2, machines2);
+ now = time(NULL);
+ for (u = 0; u < users * 2; u++) {
+ uint32_t tu = random() * users / RAND_MAX;
+ uint32_t tm = random() * machines2 / RAND_MAX;
+
+ char *url = machines_urls[tm];
+ char buf[FILENAME_MAX + 1];
+ if (random() % 10000 == 1234) {
+ snprintfz(buf, FILENAME_MAX, "http://random.%ld.netdata.rocks/", random());
+ url = buf;
+ }
+ else if (random() % 1000 == 123)
+ url = machines_urls[random() * machines2 / RAND_MAX];
+
+ PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], url, "test", now);
+
+ if (p->guid != users_guids[tu])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
+ }
+ print_stats(u, start, timems());
+ }
+
+ fprintf(stderr, "\n\nSAVE\n");
+ start = timems();
+ registry_save();
+ print_stats(registry.persons_count, start, timems());
+
+ fprintf(stderr, "\n\nCLEANUP\n");
+ start = timems();
+ registry_free();
+ print_stats(registry.persons_count, start, timems());
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// TESTING
+
+int main(int argc, char **argv) {
+ config_set_boolean("registry", "enabled", 1);
+
+ //debug_flags = 0xFFFFFFFF;
+ test1(argc, argv);
+ exit(0);
+
+ (void)argc;
+ (void)argv;
+
+
+ PERSON *p1, *p2;
+
+ fprintf(stderr, "\n\nINITIALIZATION\n");
+
+ registry_init();
+
+ int i = 2;
+
+ fprintf(stderr, "\n\nADDING ENTRY\n");
+ p1 = registry_request_access("2c95abd0-1542-11e6-8c66-00508db7e9c9", "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
+
+ if(0)
+ while(i--) {
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ENTRY\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER URL\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://127.0.0.1:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER URL\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER PERSON\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p2 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p2 = registry_request_access(p2->guid, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
+ }
+
+ fprintf(stderr, "\n\nSAVE\n");
+ registry_save();
+
+ fprintf(stderr, "\n\nCLEANUP\n");
+ registry_free();
+ return 0;
+}
diff --git a/tests/profile/benchmark-value-pairs.c b/tests/profile/benchmark-value-pairs.c
new file mode 100644
index 0000000..ae4f53c
--- /dev/null
+++ b/tests/profile/benchmark-value-pairs.c
@@ -0,0 +1,623 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+
+#ifdef simple_hash
+#undef simple_hash
+#endif
+
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+#define simple_hash(name) ({ \
+ register unsigned char *__hash_source = (unsigned char *)(name); \
+ register uint32_t __hash_value = 0x811c9dc5; \
+ while (*__hash_source) { \
+ __hash_value *= 16777619; \
+ __hash_value ^= (uint32_t) *__hash_source++; \
+ } \
+ __hash_value; \
+})
+
+static inline uint32_t simple_hash2(const char *name) {
+ register unsigned char *s = (unsigned char *)name;
+ register uint32_t hval = 0x811c9dc5;
+ while (*s) {
+ hval *= 16777619;
+ hval ^= (uint32_t) *s++;
+ }
+ return hval;
+}
+
+static inline unsigned long long fast_strtoull(const char *s) {
+ register unsigned long long n = 0;
+ register char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ // n = (n << 1) + (n << 3) + (c - '0');
+ }
+ return n;
+}
+
+static uint32_t cache_hash = 0;
+static uint32_t rss_hash = 0;
+static uint32_t rss_huge_hash = 0;
+static uint32_t mapped_file_hash = 0;
+static uint32_t writeback_hash = 0;
+static uint32_t dirty_hash = 0;
+static uint32_t swap_hash = 0;
+static uint32_t pgpgin_hash = 0;
+static uint32_t pgpgout_hash = 0;
+static uint32_t pgfault_hash = 0;
+static uint32_t pgmajfault_hash = 0;
+static uint32_t inactive_anon_hash = 0;
+static uint32_t active_anon_hash = 0;
+static uint32_t inactive_file_hash = 0;
+static uint32_t active_file_hash = 0;
+static uint32_t unevictable_hash = 0;
+static uint32_t hierarchical_memory_limit_hash = 0;
+static uint32_t total_cache_hash = 0;
+static uint32_t total_rss_hash = 0;
+static uint32_t total_rss_huge_hash = 0;
+static uint32_t total_mapped_file_hash = 0;
+static uint32_t total_writeback_hash = 0;
+static uint32_t total_dirty_hash = 0;
+static uint32_t total_swap_hash = 0;
+static uint32_t total_pgpgin_hash = 0;
+static uint32_t total_pgpgout_hash = 0;
+static uint32_t total_pgfault_hash = 0;
+static uint32_t total_pgmajfault_hash = 0;
+static uint32_t total_inactive_anon_hash = 0;
+static uint32_t total_active_anon_hash = 0;
+static uint32_t total_inactive_file_hash = 0;
+static uint32_t total_active_file_hash = 0;
+static uint32_t total_unevictable_hash = 0;
+
+unsigned long long values1[50] = { 0 };
+unsigned long long values2[50] = { 0 };
+unsigned long long values3[50] = { 0 };
+unsigned long long values4[50] = { 0 };
+unsigned long long values5[50] = { 0 };
+unsigned long long values6[50] = { 0 };
+unsigned long long values7[50] = { 0 };
+unsigned long long values8[50] = { 0 };
+unsigned long long values9[50] = { 0 };
+
+struct pair {
+ const char *name;
+ const char *value;
+ uint32_t hash;
+ unsigned long long *collected8;
+ unsigned long long *collected9;
+} pairs[] = {
+ { "cache", "12345678901234", 0, &values8[0] ,&values9[0] },
+ { "rss", "23456789012345", 0, &values8[1] ,&values9[1] },
+ { "rss_huge", "34567890123456", 0, &values8[2] ,&values9[2] },
+ { "mapped_file", "45678901234567", 0, &values8[3] ,&values9[3] },
+ { "writeback", "56789012345678", 0, &values8[4] ,&values9[4] },
+ { "dirty", "67890123456789", 0, &values8[5] ,&values9[5] },
+ { "swap", "78901234567890", 0, &values8[6] ,&values9[6] },
+ { "pgpgin", "89012345678901", 0, &values8[7] ,&values9[7] },
+ { "pgpgout", "90123456789012", 0, &values8[8] ,&values9[8] },
+ { "pgfault", "10345678901234", 0, &values8[9] ,&values9[9] },
+ { "pgmajfault", "11456789012345", 0, &values8[10] ,&values9[10] },
+ { "inactive_anon", "12000000000000", 0, &values8[11] ,&values9[11] },
+ { "active_anon", "13345678901234", 0, &values8[12] ,&values9[12] },
+ { "inactive_file", "14345678901234", 0, &values8[13] ,&values9[13] },
+ { "active_file", "15345678901234", 0, &values8[14] ,&values9[14] },
+ { "unevictable", "16345678901234", 0, &values8[15] ,&values9[15] },
+ { "hierarchical_memory_limit", "17345678901234", 0, &values8[16] ,&values9[16] },
+ { "total_cache", "18345678901234", 0, &values8[17] ,&values9[17] },
+ { "total_rss", "19345678901234", 0, &values8[18] ,&values9[18] },
+ { "total_rss_huge", "20345678901234", 0, &values8[19] ,&values9[19] },
+ { "total_mapped_file", "21345678901234", 0, &values8[20] ,&values9[20] },
+ { "total_writeback", "22345678901234", 0, &values8[21] ,&values9[21] },
+ { "total_dirty", "23000000000000", 0, &values8[22] ,&values9[22] },
+ { "total_swap", "24345678901234", 0, &values8[23] ,&values9[23] },
+ { "total_pgpgin", "25345678901234", 0, &values8[24] ,&values9[24] },
+ { "total_pgpgout", "26345678901234", 0, &values8[25] ,&values9[25] },
+ { "total_pgfault", "27345678901234", 0, &values8[26] ,&values9[26] },
+ { "total_pgmajfault", "28345678901234", 0, &values8[27] ,&values9[27] },
+ { "total_inactive_anon", "29345678901234", 0, &values8[28] ,&values9[28] },
+ { "total_active_anon", "30345678901234", 0, &values8[29] ,&values9[29] },
+ { "total_inactive_file", "31345678901234", 0, &values8[30] ,&values9[30] },
+ { "total_active_file", "32345678901234", 0, &values8[31] ,&values9[31] },
+ { "total_unevictable", "33345678901234", 0, &values8[32] ,&values9[32] },
+ { NULL, NULL , 0, NULL ,NULL }
+};
+
+// simple system strcmp()
+void test1() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ if(unlikely(!strcmp(s, "cache")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss_huge")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "mapped_file")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "writeback")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "dirty")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "swap")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgin")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgout")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgfault")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgmajfault")))
+ values1[i] = strtoull(v, NULL, 10);
+ }
+}
+
+// inline simple_hash() with system strtoull()
+void test2() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values2[i] = strtoull(v, NULL, 10);
+ }
+}
+
+// statement expression simple_hash(), system strtoull()
+void test3() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values3[i] = strtoull(v, NULL, 10);
+ }
+}
+
+
+// inline simple_hash(), if-continue checks
+void test4() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+ }
+}
+
+// inline simple_hash(), if-else-if-else-if (netdata default)
+void test5() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values5[i] = fast_strtoull(v);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+void arl_strtoull(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register unsigned long long *d = dst;
+ *d = strtoull(value, NULL, 10);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
+}
+
+void test6() {
+ static ARL_BASE *base = NULL;
+
+ if(unlikely(!base)) {
+ base = arl_create("test6", arl_strtoull, 60);
+ arl_expect_custom(base, "cache", NULL, &values6[0]);
+ arl_expect_custom(base, "rss", NULL, &values6[1]);
+ arl_expect_custom(base, "rss_huge", NULL, &values6[2]);
+ arl_expect_custom(base, "mapped_file", NULL, &values6[3]);
+ arl_expect_custom(base, "writeback", NULL, &values6[4]);
+ arl_expect_custom(base, "dirty", NULL, &values6[5]);
+ arl_expect_custom(base, "swap", NULL, &values6[6]);
+ arl_expect_custom(base, "pgpgin", NULL, &values6[7]);
+ arl_expect_custom(base, "pgpgout", NULL, &values6[8]);
+ arl_expect_custom(base, "pgfault", NULL, &values6[9]);
+ arl_expect_custom(base, "pgmajfault", NULL, &values6[10]);
+ }
+
+ arl_begin(base);
+
+ int i;
+ for(i = 0; pairs[i].name ; i++)
+ if(arl_check(base, pairs[i].name, pairs[i].value)) break;
+}
+
+void arl_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register unsigned long long *d = dst;
+ *d = str2ull(value);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
+}
+
+void test7() {
+ static ARL_BASE *base = NULL;
+
+ if(unlikely(!base)) {
+ base = arl_create("test7", arl_str2ull, 60);
+ arl_expect_custom(base, "cache", NULL, &values7[0]);
+ arl_expect_custom(base, "rss", NULL, &values7[1]);
+ arl_expect_custom(base, "rss_huge", NULL, &values7[2]);
+ arl_expect_custom(base, "mapped_file", NULL, &values7[3]);
+ arl_expect_custom(base, "writeback", NULL, &values7[4]);
+ arl_expect_custom(base, "dirty", NULL, &values7[5]);
+ arl_expect_custom(base, "swap", NULL, &values7[6]);
+ arl_expect_custom(base, "pgpgin", NULL, &values7[7]);
+ arl_expect_custom(base, "pgpgout", NULL, &values7[8]);
+ arl_expect_custom(base, "pgfault", NULL, &values7[9]);
+ arl_expect_custom(base, "pgmajfault", NULL, &values7[10]);
+ }
+
+ arl_begin(base);
+
+ int i;
+ for(i = 0; pairs[i].name ; i++)
+ if(arl_check(base, pairs[i].name, pairs[i].value)) break;
+}
+
+void test8() {
+ int i;
+ for(i = 0; pairs[i].name; i++) {
+ uint32_t hash = simple_hash(pairs[i].name);
+
+ int j;
+ for(j = 0; pairs[j].name; j++) {
+ if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
+ *pairs[j].collected8 = strtoull(pairs[i].value, NULL, 10);
+ break;
+ }
+ }
+ }
+}
+
+void test9() {
+ int i;
+ for(i = 0; pairs[i].name; i++) {
+ uint32_t hash = simple_hash(pairs[i].name);
+
+ int j;
+ for(j = 0; pairs[j].name; j++) {
+ if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
+ *pairs[j].collected9 = str2ull(pairs[i].value);
+ break;
+ }
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+/*
+// ==============
+// --- Poor man cycle counting.
+static unsigned long tsc;
+
+static void begin_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+
+static unsigned long end_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+// ===============
+*/
+
+static unsigned long long clk;
+
+static void begin_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return;
+ clk = tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+static unsigned long long end_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return -1;
+ return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
+}
+
+int main(void)
+{
+ {
+ int i;
+ for(i = 0; pairs[i].name; i++)
+ pairs[i].hash = simple_hash(pairs[i].name);
+ }
+
+ cache_hash = simple_hash("cache");
+ rss_hash = simple_hash("rss");
+ rss_huge_hash = simple_hash("rss_huge");
+ mapped_file_hash = simple_hash("mapped_file");
+ writeback_hash = simple_hash("writeback");
+ dirty_hash = simple_hash("dirty");
+ swap_hash = simple_hash("swap");
+ pgpgin_hash = simple_hash("pgpgin");
+ pgpgout_hash = simple_hash("pgpgout");
+ pgfault_hash = simple_hash("pgfault");
+ pgmajfault_hash = simple_hash("pgmajfault");
+ inactive_anon_hash = simple_hash("inactive_anon");
+ active_anon_hash = simple_hash("active_anon");
+ inactive_file_hash = simple_hash("inactive_file");
+ active_file_hash = simple_hash("active_file");
+ unevictable_hash = simple_hash("unevictable");
+ hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
+ total_cache_hash = simple_hash("total_cache");
+ total_rss_hash = simple_hash("total_rss");
+ total_rss_huge_hash = simple_hash("total_rss_huge");
+ total_mapped_file_hash = simple_hash("total_mapped_file");
+ total_writeback_hash = simple_hash("total_writeback");
+ total_dirty_hash = simple_hash("total_dirty");
+ total_swap_hash = simple_hash("total_swap");
+ total_pgpgin_hash = simple_hash("total_pgpgin");
+ total_pgpgout_hash = simple_hash("total_pgpgout");
+ total_pgfault_hash = simple_hash("total_pgfault");
+ total_pgmajfault_hash = simple_hash("total_pgmajfault");
+ total_inactive_anon_hash = simple_hash("total_inactive_anon");
+ total_active_anon_hash = simple_hash("total_active_anon");
+ total_inactive_file_hash = simple_hash("total_inactive_file");
+ total_active_file_hash = simple_hash("total_active_file");
+ total_unevictable_hash = simple_hash("total_unevictable");
+
+ // cache functions
+ (void)simple_hash2("hello world");
+ (void)strcmp("1", "2");
+ (void)strtoull("123", NULL, 0);
+
+ unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7 = 0, c8 = 0, c9 = 0;
+ unsigned long max = 1000000;
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test2();
+ c2 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test3();
+ c3 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test4();
+ c4 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test5();
+ c5 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test6();
+ c6 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test7();
+ c7 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test8();
+ c8 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test9();
+ c9 = end_clock();
+
+ for(i = 0; i < 11 ; i++)
+ printf("value %lu: %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i], values7[i], values8[i], values9[i]);
+
+ printf("\n\nRESULTS\n");
+ printf("test1() [1] in %lu usecs: simple system strcmp().\n"
+ "test2() [4] in %lu usecs: inline simple_hash() with system strtoull().\n"
+ "test3() [5] in %lu usecs: statement expression simple_hash(), system strtoull().\n"
+ "test4() [6] in %lu usecs: inline simple_hash(), if-continue checks.\n"
+ "test5() [7] in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default prior to ARL).\n"
+ "test6() [8] in %lu usecs: adaptive re-sortable array with strtoull() (wow!)\n"
+ "test7() [9] in %lu usecs: adaptive re-sortable array with str2ull() (wow!)\n"
+ "test8() [2] in %lu usecs: nested loop with strtoull()\n"
+ "test9() [3] in %lu usecs: nested loop with str2ull()\n"
+ , c1
+ , c2
+ , c3
+ , c4
+ , c5
+ , c6
+ , c7
+ , c8
+ , c9
+ );
+
+ return 0;
+}
diff --git a/tests/profile/statsd-stress.c b/tests/profile/statsd-stress.c
new file mode 100644
index 0000000..435d58d
--- /dev/null
+++ b/tests/profile/statsd-stress.c
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <string.h>
+#include <time.h>
+#include <pthread.h>
+
+void diep(char *s)
+{
+ perror(s);
+ exit(1);
+}
+
+size_t run_threads = 1;
+size_t metrics = 1024;
+
+#define SERVER_IP "127.0.0.1"
+#define PORT 8125
+
+size_t myrand(size_t max) {
+ size_t loops = max / RAND_MAX;
+ size_t i;
+
+ size_t ret = rand();
+ for(i = 0; i < loops ;i++)
+ ret += rand();
+
+ return ret % max;
+}
+
+struct thread_data {
+ size_t id;
+ struct sockaddr_in *si_other;
+ int slen;
+ size_t counter;
+};
+
+static void *report_thread(void *__data) {
+ struct thread_data *data = (struct thread_data *)__data;
+
+ size_t last = 0;
+ for (;;) {
+ size_t i;
+ size_t total = 0;
+ for(i = 0; i < run_threads ;i++)
+ total += data[i].counter;
+
+ printf("%zu metrics/s\n", total-last);
+ last = total;
+
+ sleep(1);
+ printf("\033[F\033[J");
+ }
+
+ return NULL;
+}
+
+char *types[] = {"g", "c", "m", "ms", "h", "s", NULL};
+// char *types[] = {"g", "c", "C", "h", "ms", NULL}; // brubeck compatible
+
+static void *spam_thread(void *__data) {
+ struct thread_data *data = (struct thread_data *)__data;
+
+ int s;
+ char packet[1024];
+
+ if ((s = socket(AF_INET, SOCK_DGRAM, 0))==-1)
+ diep("socket");
+
+ char **packets = malloc(sizeof(char *) * metrics);
+ size_t i, *lengths = malloc(sizeof(size_t) * metrics);
+ size_t t;
+
+ for(i = 0, t = 0; i < metrics ;i++, t++) {
+ if(!types[t]) t = 0;
+ char *type = types[t];
+
+ lengths[i] = sprintf(packet, "stress.%s.t%zu.m%zu:%zu|%s", type, data->id, i, myrand(metrics), type);
+ packets[i] = strdup(packet);
+ // printf("packet %zu, of length %zu: '%s'\n", i, lengths[i], packets[i]);
+ }
+ //printf("\n");
+
+ for (;;) {
+ for(i = 0; i < metrics ;i++) {
+ if (sendto(s, packets[i], lengths[i], 0, (void *)data->si_other, data->slen) < 0) {
+ printf("C ==> DROPPED\n");
+ return NULL;
+ }
+ data->counter++;
+ }
+ }
+
+ free(packets);
+ free(lengths);
+ close(s);
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc != 5) {
+ fprintf(stderr, "Usage: '%s THREADS METRICS IP PORT'\n", argv[0]);
+ exit(-1);
+ }
+
+ run_threads = atoi(argv[1]);
+ metrics = atoi(argv[2]);
+ char *ip = argv[3];
+ int port = atoi(argv[4]);
+
+ struct thread_data data[run_threads];
+ struct sockaddr_in si_other;
+ pthread_t threads[run_threads], report;
+ size_t i;
+
+ srand(time(NULL));
+
+ memset(&si_other, 0, sizeof(si_other));
+ si_other.sin_family = AF_INET;
+ si_other.sin_port = htons(port);
+ if (inet_aton(ip, &si_other.sin_addr)==0) {
+ fprintf(stderr, "inet_aton() of ip '%s' failed\n", ip);
+ exit(1);
+ }
+
+ for (i = 0; i < run_threads; ++i) {
+ data[i].id = i;
+ data[i].si_other = &si_other;
+ data[i].slen = sizeof(si_other);
+ data[i].counter = 0;
+ pthread_create(&threads[i], NULL, spam_thread, &data[i]);
+ }
+
+ printf("\n");
+ printf("THREADS : %zu\n", run_threads);
+ printf("METRICS : %zu\n", metrics);
+ printf("DESTINATION : %s:%d\n", ip, port);
+ printf("\n");
+ pthread_create(&report, NULL, report_thread, &data);
+
+ for (i =0; i < run_threads; ++i)
+ pthread_join(threads[i], NULL);
+
+ return 0;
+}
diff --git a/tests/profile/test-eval.c b/tests/profile/test-eval.c
new file mode 100644
index 0000000..144381c
--- /dev/null
+++ b/tests/profile/test-eval.c
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+/*
+ * 1. build netdata (as normally)
+ * 2. cd profile/
+ * 3. compile with:
+ * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o test-eval test-eval.c ../src/log.o ../src/eval.o ../src/common.o ../src/clocks.o ../src/web_buffer.o ../src/storage_number.o -pthread -lm
+ */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+#include "database/rrdcalc.h"
+
+void netdata_cleanup_and_exit(int ret) { exit(ret); }
+
+/*
+void indent(int level, int show) {
+ int i = level;
+ while(i--) printf(" | ");
+ if(show) printf(" \\_ ");
+ else printf(" \\_ ");
+}
+
+void print_node(EVAL_NODE *op, int level);
+
+void print_value(EVAL_VALUE *v, int level) {
+ indent(level, 0);
+
+ switch(v->type) {
+ case EVAL_VALUE_INVALID:
+ printf("value (NOP)\n");
+ break;
+
+ case EVAL_VALUE_NUMBER:
+ printf("value %Lf (NUMBER)\n", v->number);
+ break;
+
+ case EVAL_VALUE_EXPRESSION:
+ printf("value (SUB-EXPRESSION)\n");
+ print_node(v->expression, level+1);
+ break;
+
+ default:
+ printf("value (INVALID type %d)\n", v->type);
+ break;
+
+ }
+}
+
+void print_node(EVAL_NODE *op, int level) {
+
+// if(op->operator != EVAL_OPERATOR_NOP) {
+ indent(level, 1);
+ if(op->operator) printf("%c (node %d, precedence: %d)\n", op->operator, op->id, op->precedence);
+ else printf("NOP (node %d, precedence: %d)\n", op->id, op->precedence);
+// }
+
+ int i = op->count;
+ while(i--) print_value(&op->ops[i], level + 1);
+}
+
+calculated_number evaluate(EVAL_NODE *op, int depth);
+
+calculated_number evaluate_value(EVAL_VALUE *v, int depth) {
+ switch(v->type) {
+ case EVAL_VALUE_NUMBER:
+ return v->number;
+
+ case EVAL_VALUE_EXPRESSION:
+ return evaluate(v->expression, depth);
+
+ default:
+ fatal("I don't know how to handle EVAL_VALUE type %d", v->type);
+ }
+}
+
+void print_depth(int depth) {
+ static int count = 0;
+
+ printf("%d. ", ++count);
+ while(depth--) printf(" ");
+}
+
+calculated_number evaluate(EVAL_NODE *op, int depth) {
+ calculated_number n1, n2, r;
+
+ switch(op->operator) {
+ case EVAL_OPERATOR_SIGN_PLUS:
+ r = evaluate_value(&op->ops[0], depth);
+ break;
+
+ case EVAL_OPERATOR_SIGN_MINUS:
+ r = -evaluate_value(&op->ops[0], depth);
+ break;
+
+ case EVAL_OPERATOR_PLUS:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 + n2;
+ print_depth(depth);
+ printf("%Lf = %Lf + %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_MINUS:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 - n2;
+ print_depth(depth);
+ printf("%Lf = %Lf - %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_MULTIPLY:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 * n2;
+ print_depth(depth);
+ printf("%Lf = %Lf * %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_DIVIDE:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 / n2;
+ print_depth(depth);
+ printf("%Lf = %Lf / %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_NOT:
+ n1 = evaluate_value(&op->ops[0], depth);
+ r = !n1;
+ print_depth(depth);
+ printf("%Lf = NOT %Lf\n", r, n1);
+ break;
+
+ case EVAL_OPERATOR_AND:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 && n2;
+ print_depth(depth);
+ printf("%Lf = %Lf AND %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_OR:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 || n2;
+ print_depth(depth);
+ printf("%Lf = %Lf OR %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_GREATER_THAN_OR_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 >= n2;
+ print_depth(depth);
+ printf("%Lf = %Lf >= %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_LESS_THAN_OR_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 <= n2;
+ print_depth(depth);
+ printf("%Lf = %Lf <= %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_GREATER:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 > n2;
+ print_depth(depth);
+ printf("%Lf = %Lf > %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_LESS:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 < n2;
+ print_depth(depth);
+ printf("%Lf = %Lf < %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_NOT_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 != n2;
+ print_depth(depth);
+ printf("%Lf = %Lf <> %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 == n2;
+ print_depth(depth);
+ printf("%Lf = %Lf == %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_EXPRESSION_OPEN:
+ printf("BEGIN SUB-EXPRESSION\n");
+ r = evaluate_value(&op->ops[0], depth + 1);
+ printf("END SUB-EXPRESSION\n");
+ break;
+
+ case EVAL_OPERATOR_NOP:
+ case EVAL_OPERATOR_VALUE:
+ r = evaluate_value(&op->ops[0], depth);
+ break;
+
+ default:
+ error("I don't know how to handle operator '%c'", op->operator);
+ r = 0;
+ break;
+ }
+
+ return r;
+}
+
+
+void print_expression(EVAL_NODE *op, const char *failed_at, int error) {
+ if(op) {
+ printf("expression tree:\n");
+ print_node(op, 0);
+
+ printf("\nevaluation steps:\n");
+ evaluate(op, 0);
+
+ int error;
+ calculated_number ret = expression_evaluate(op, &error);
+ printf("\ninternal evaluator:\nSTATUS: %d, RESULT = %Lf\n", error, ret);
+
+ expression_free(op);
+ }
+ else {
+ printf("error: %d, failed_at: '%s'\n", error, (failed_at)?failed_at:"<NONE>");
+ }
+}
+*/
+
+int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ if(argc != 2) {
+ fprintf(stderr, "I need an epxression (enclose it in single-quotes (') as a single parameter)\n");
+ exit(1);
+ }
+
+ const char *failed_at = NULL;
+ int error;
+
+ EVAL_EXPRESSION *exp = expression_parse(argv[1], &failed_at, &error);
+ if(!exp)
+ printf("\nPARSING FAILED\nExpression: '%s'\nParsing stopped at: '%s'\nParsing error code: %d (%s)\n", argv[1], (failed_at)?((*failed_at)?failed_at:"<END OF EXPRESSION>"):"<NONE>", error, expression_strerror(error));
+
+ else {
+ printf("\nPARSING OK\nExpression: '%s'\nParsed as : '%s'\nParsing error code: %d (%s)\n", argv[1], exp->parsed_as, error, expression_strerror(error));
+
+ if(expression_evaluate(exp)) {
+ printf("\nEvaluates to: %Lf\n\n", exp->result);
+ }
+ else {
+ printf("\nEvaluation failed with code %d and message: %s\n\n", exp->error, buffer_tostring(exp->error_msg));
+ }
+ expression_free(exp);
+ }
+
+ return 0;
+}
diff --git a/tests/run-unit-tests.sh b/tests/run-unit-tests.sh
new file mode 100755
index 0000000..70d618a
--- /dev/null
+++ b/tests/run-unit-tests.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+#
+# Unit-testing script
+#
+# This script does the following:
+# 1. Check whether any files were modified that would necessitate unit testing (using the `TRAVIS_COMMIT_RANGE` environment variable).
+# 2. If there are no changed files that require unit testing, exit successfully.
+# 3. Otherwise, run all the unit tests.
+#
+# We do things this way because our unit testing takes a rather long
+# time (average 18-19 minutes as of the original creation of this script),
+# so skipping it when we don't actually need it can significantly speed
+# up the CI process.
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Austin S. Hemmelgarn <austin@netdata.cloud>
+#
+# shellcheck disable=SC2230
+
+install_netdata() {
+ echo "Installing Netdata"
+ fakeroot ./netdata-installer.sh \
+ --install "$HOME" \
+ --dont-wait \
+ --dont-start-it \
+ --enable-plugin-nfacct \
+ --enable-plugin-freeipmi \
+ --disable-lto
+}
+
+c_unit_tests() {
+ echo "Running C code unit tests"
+ "$HOME"/netdata/usr/sbin/netdata -W unittest
+}
+
+install_netdata || exit 1
+
+c_unit_tests || exit 1
diff --git a/tests/stress.sh b/tests/stress.sh
new file mode 100755
index 0000000..97cced0
--- /dev/null
+++ b/tests/stress.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+if ! hash curl 2>/dev/null
+then
+ 1>&2 echo "'curl' not found on system. Please install 'curl'."
+ exit 1
+fi
+
+# set the host to connect to
+if [ -n "$1" ]
+then
+ host="$1"
+else
+ host="http://127.0.0.1:19999"
+fi
+echo "using netdata server at: $host"
+
+# shellcheck disable=SC2207 disable=SC1117
+charts=($(curl -k "$host/netdata.conf" 2>/dev/null | grep "^\[" | cut -d '[' -f 2 | cut -d ']' -f 1 | grep -v ^global$ | grep -v "^plugin" | sort -u))
+if [ "${#charts[@]}" -eq 0 ]
+then
+ echo "Cannot download charts from server: $host"
+ exit 1
+fi
+
+update_every="$(curl -k "$host/netdata.conf" 2>/dev/null | grep "update every = " | head -n 1 | cut -d '=' -f 2)"
+[ $(( update_every + 1 - 1)) -eq 0 ] && update_every=1
+
+entries="$(curl -k "$host/netdata.conf" 2>/dev/null | grep "history = " | head -n 1 | cut -d '=' -f 2)"
+[ $(( entries + 1 - 1)) -eq 0 ] && entries=3600
+
+# to compare equal things, set the entries to 3600 max
+[ $entries -gt 3600 ] && entries=3600
+
+if [ $entries -ne 3600 ]
+then
+ echo >&2 "You are running a test for a history of $entries entries."
+fi
+
+modes=("average" "max")
+formats=("jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array")
+options="flip|jsonwrap"
+
+now=$(date +%s)
+first=$((now - (entries * update_every)))
+duration=$((now - first))
+
+file="$(mktemp /tmp/netdata-stress-XXXXXXXX)"
+cleanup() {
+ echo "cleanup"
+ [ -f "$file" ] && rm "$file"
+}
+trap cleanup EXIT
+
+while true
+do
+ echo "curl -k --compressed --keepalive-time 120 --header \"Connection: keep-alive\" \\" >"$file"
+ # shellcheck disable=SC2034
+ for x in {1..100}
+ do
+ dt=$((RANDOM * duration / 32767))
+ st=$((RANDOM * duration / 32767))
+ et=$(( st + dt ))
+ [ $et -gt "$now" ] && st=$(( now - dt ))
+
+ points=$((RANDOM * 2000 / 32767 + 2))
+ st=$((first + st))
+ et=$((first + et))
+
+ mode=$((RANDOM * ${#modes[@]} / 32767))
+ mode="${modes[$mode]}"
+
+ chart=$((RANDOM * ${#charts[@]} / 32767))
+ chart="${charts[$chart]}"
+
+ format=$((RANDOM * ${#formats[@]} / 32767))
+ format="${formats[$format]}"
+
+ echo "--url \"$host/api/v1/data?chart=$chart&mode=$mode&format=$format&options=$options&after=$st&before=$et&points=$points\" \\"
+ done >>"$file"
+ bash "$file" >/dev/null
+done
diff --git a/tests/template_dimension/system_cpu.conf.alarm_foreach b/tests/template_dimension/system_cpu.conf.alarm_foreach
new file mode 100644
index 0000000..21a8cbb
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.alarm_foreach
@@ -0,0 +1,8 @@
+ alarm: dev_dim_template
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage foreach system,user,nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.alarm_foreach_sp b/tests/template_dimension/system_cpu.conf.alarm_foreach_sp
new file mode 100644
index 0000000..fdd19e8
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.alarm_foreach_sp
@@ -0,0 +1,8 @@
+ alarm: dev_dim_template
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage foreach *
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.template_alarm b/tests/template_dimension/system_cpu.conf.template_alarm
new file mode 100644
index 0000000..2bd12a1
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.template_alarm
@@ -0,0 +1,26 @@
+template: dev_dim_template_system
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage of system
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+template: dev_dim_template_user
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage of user
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+template: dev_dim_template_nice
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage of nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.template_foreach b/tests/template_dimension/system_cpu.conf.template_foreach
new file mode 100644
index 0000000..c75c15b
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.template_foreach
@@ -0,0 +1,8 @@
+template: dev_dim_template
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage foreach system,user,nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.template_foreach_sp b/tests/template_dimension/system_cpu.conf.template_foreach_sp
new file mode 100644
index 0000000..f50a832
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.template_foreach_sp
@@ -0,0 +1,8 @@
+ template: dev_dim_template
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage foreach *
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.unique_alarm b/tests/template_dimension/system_cpu.conf.unique_alarm
new file mode 100644
index 0000000..0f38b6e
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.unique_alarm
@@ -0,0 +1,26 @@
+ alarm: dev_dim_template_system
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage of system
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+ alarm: dev_dim_template_user
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage of user
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+ alarm: dev_dim_template_nice
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage of nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/template_dim.sh.in b/tests/template_dimension/template_dim.sh.in
new file mode 100644
index 0000000..88978fd
--- /dev/null
+++ b/tests/template_dimension/template_dim.sh.in
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+#The health directory to put the alarms
+HEALTHDIR="@configdir_POST@/health.d/"
+
+#the current time
+OUTDIR="alarms"
+QUERY="/api/v1/alarms?all"
+MURL="http://localhost:19999$QUERY"
+
+#error messages
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+NOCOLOR='\033[0m'
+
+ALARMTEST="dev_dim_template"
+
+change_alarm_file() {
+ if [ -f "$1" ]; then
+ rm "$1"
+ fi
+
+ #copy keeping the permissions
+ cp -a "$2" "$3"
+}
+
+netdata_test_download() {
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST="$?"
+ if [ "$TEST" -ne "0" ]; then
+ echo -e "${RED} Error to get the alarm log. ${NOCOLOR}"
+ exit 1
+ fi
+
+ TOTALARM=$(grep "$ALARMTEST" "$2" | grep name | cut -d: -f2 | grep -c "$ALARMTEST")
+
+ if [ "$TOTALARM" -ne "$3" ]; then
+ echo -e "${RED} The number of actives alarms with the name $SYSTEMALARM is wrong ${NOCOLOR}"
+ exit 1
+ fi
+}
+
+get_the_logs() {
+ curl -v -k --create-dirs -o "$OUTDIR/$1.out" "$MURL" 2> "$OUTDIR/$1.err"
+ netdata_test_download "$OUTDIR/$1.err" "$OUTDIR/$1.out" "$2"
+}
+
+process_data() {
+ netdata -D &
+ NETDATAPID=$!
+ echo -e "${NOCOLOR}Sleeping during 15 seconds to create alarms"
+ sleep 15
+ kill $NETDATAPID
+ get_the_logs "$1" "$2"
+}
+
+mkdir "$OUTDIR"
+CREATEDIR="$?"
+if [ "$CREATEDIR" -ne "0" ]; then
+ echo -e "${RED}Cannot create the output directory, it already exists. The test will overwrite previous results. ${NOCOLOR}"
+fi
+
+if [ -n "$1" ]; then
+ MURL="$1$QUERY"
+fi
+
+change_alarm_file "./0" "system_cpu.conf.unique_alarm" "$HEALTHDIR/dim_double_without_template.conf"
+process_data "double_without_template" 3 "$HEALTHDIR/dim_double_without_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_double_without_template.conf" "system_cpu.conf.alarm_foreach" "$HEALTHDIR/dim_foreach_without_template.conf"
+process_data "foreach_without_template" 3 "$HEALTHDIR/dim_foreach_without_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_foreach_without_template.conf" "system_cpu.conf.alarm_foreach_sp" "$HEALTHDIR/dim_foreach_without_template_sp.conf"
+process_data "foreach_without_template" 10 "$HEALTHDIR/dim_foreach_without_template_sp.conf"
+
+change_alarm_file "$HEALTHDIR/dim_foreach_without_template_sp.conf" "system_cpu.conf.template_alarm" "$HEALTHDIR/dim_double_with_template.conf"
+process_data "double_with_template" 3 "$HEALTHDIR/dim_double_with_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_double_with_template.conf" "system_cpu.conf.template_foreach" "$HEALTHDIR/dim_foreach_with_template.conf"
+process_data "foreach_with_template" 3 "$HEALTHDIR/dim_foreach_with_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_foreach_with_template.conf" "system_cpu.conf.template_foreach_sp" "$HEALTHDIR/dim_foreach_with_template_sp.conf"
+process_data "foreach_with_template" 10 "$HEALTHDIR/dim_foreach_with_template_sp.conf"
+
+rm "$HEALTHDIR/dim_foreach_with_template_sp.conf"
+rm -rf "$OUTDIR"
+
+echo -e "${GREEN} all the tests were sucessful ${NOCOLOR}"
diff --git a/tests/updater_checks.bats b/tests/updater_checks.bats
new file mode 100755
index 0000000..930cea9
--- /dev/null
+++ b/tests/updater_checks.bats
@@ -0,0 +1,66 @@
+#!/usr/bin/env bats
+#
+# This script is responsible for validating
+# updater capabilities after a change
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+#
+
+INSTALLATION="$BATS_TMPDIR/installation"
+ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
+# list of files which need to be checked. Path cannot start from '/'
+FILES="usr/libexec/netdata/plugins.d/go.d.plugin
+ usr/libexec/netdata/plugins.d/charts.d.plugin
+ usr/libexec/netdata/plugins.d/python.d.plugin
+ usr/libexec/netdata/plugins.d/node.d.plugin"
+
+DIRS="usr/sbin/netdata
+ etc/netdata
+ usr/share/netdata
+ usr/libexec/netdata
+ var/cache/netdata
+ var/lib/netdata
+ var/log/netdata"
+
+setup() {
+ # If we are not in netdata git repo, at the top level directory, fail
+ TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+ CWD=$(git rev-parse --show-cdup || echo "")
+ if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./tests/$(basename "$0") from top level directory of git repository"
+ exit 1
+ fi
+}
+
+@test "install stable netdata using kickstart" {
+ ./packaging/installer/kickstart.sh --dont-wait --dont-start-it --auto-update --install ${INSTALLATION}
+
+ # Validate particular files
+ for file in $FILES; do
+ [ ! -f "$BATS_TMPDIR/$file" ]
+ done
+
+ # Validate particular directories
+ for a_dir in $DIRS; do
+ [ ! -d "$BATS_TMPDIR/$a_dir" ]
+ done
+
+ # Cleanup
+ rm -rf ${kickstart_file}
+}
+
+@test "update netdata using the new updater" {
+ export ENVIRONMENT_FILE="${ENV}"
+ # Run the updater, with the override so that it uses the local repo we have at hand
+ export NETDATA_LOCAL_TARBAL_OVERRIDE="${PWD}"
+ ${INSTALLATION}/netdata/usr/libexec/netdata/netdata-updater.sh --not-running-from-cron
+ ! grep "new_installation" "${ENV}"
+}
+
+@test "uninstall netdata using latest uninstaller" {
+ ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
+ [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
+ [ ! -f "/etc/cron.daily/netdata-updater" ]
+}
diff --git a/tests/updater_checks.sh b/tests/updater_checks.sh
new file mode 100755
index 0000000..dff87a6
--- /dev/null
+++ b/tests/updater_checks.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env sh
+#
+# Wrapper script that installs the required dependencies
+# for the BATS script to run successfully
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+#
+
+echo "Syncing/updating repository.."
+
+blind_arch_grep_install() {
+ # There is a peculiar docker case with arch, where grep is not available
+ # This method will have to be triggered blindly, to inject grep so that we can process
+ # It starts to become a chicken-egg situation with all the distros..
+ echo "* * Workaround hack * *"
+ echo "Attempting blind install for archlinux case"
+
+ if command -v pacman > /dev/null 2>&1; then
+ echo "Executing grep installation"
+ pacman -Sy
+ pacman --noconfirm --needed -S grep
+ fi
+}
+blind_arch_grep_install || echo "Workaround failed, proceed as usual"
+
+running_os="$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | sed -e 's/"//g')"
+
+case "${running_os}" in
+"centos"|"fedora"|"CentOS")
+ echo "Running on CentOS, updating YUM repository.."
+ yum clean all
+ yum update -y
+
+ echo "Installing extra dependencies.."
+ yum install -y epel-release
+ yum install -y bats curl
+ ;;
+"debian"|"ubuntu")
+ echo "Running ${running_os}, updating APT repository"
+ apt-get update -y
+ apt-get install -y bats curl
+ ;;
+"opensuse-leap"|"opensuse-tumbleweed")
+ zypper update -y
+ zypper install -y bats curl
+
+ # Fixes curl: (60) SSL certificate problem: unable to get local issuer certificate
+ # https://travis-ci.com/netdata/netdata/jobs/267573805
+ update-ca-certificates
+ ;;
+"arch")
+ pacman --noconfirm -Syu
+ pacman --noconfirm --needed -S bash-bats curl libffi
+ ;;
+"alpine")
+ apk update
+ apk add bash curl bats
+ ;;
+*)
+ echo "Running on ${running_os}, no repository preparation done"
+ ;;
+esac
+
+# Run depednency scriptlet, before anything else
+#
+./packaging/installer/install-required-packages.sh --non-interactive netdata
+
+echo "Running BATS file.."
+bats --tap tests/updater_checks.bats
diff --git a/tests/urls/request.sh.in b/tests/urls/request.sh.in
new file mode 100644
index 0000000..ebdfc09
--- /dev/null
+++ b/tests/urls/request.sh.in
@@ -0,0 +1,307 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+################################################################################################
+#### ####
+#### GLOBAL VARIABLES ####
+#### ####
+################################################################################################
+
+# The current time
+CT=$(date +'%s')
+
+# The previous time
+PT=$((CT - 30))
+
+# The output directory where we will store the results and error
+OUTDIR="tests"
+OUTEDIR="encoded_tests"
+OUTOPTDIR="options"
+ERRDIR="etests"
+NOCOLOR='\033[0'
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+
+################################################################################################
+#### ####
+#### FUNCTIONS ####
+#### ####
+################################################################################################
+
+# Print error message and close script
+netdata_print_error(){
+ echo "${RED} Closing due error \"$1\" code \"$2\" ${NOCOLOR}"
+ exit 1
+}
+
+# Print the header message of the function
+netdata_print_header() {
+ echo "$1"
+}
+
+# Create the main directory where the results will be stored
+netdata_create_directory() {
+ netdata_print_header "Creating directory $1"
+ if [ ! -d "$1" ]; then
+ mkdir "$1"
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot create directory $?"
+ fi
+ else
+ echo "Working with directory $OUTDIR"
+ fi
+}
+
+#Check whether download did not have problem
+netdata_test_download(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot do download of the page $2" $?
+ exit 1
+ fi
+}
+
+#Check whether download had a problem
+netdata_error_test(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -eq 0 ]; then
+ netdata_print_error "The page $2 did not answer with an error" $?
+ exit 1
+ fi
+}
+
+
+# Download information from Netdata
+netdata_download_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$OUTDIR/$3.out" "$1/$2" 2> "$OUTDIR/$3.err"
+ netdata_test_download "$OUTDIR/$3.err" "$1/$2"
+}
+
+netdata_download_various_with_options() {
+ netdata_print_header "Getting options for $2"
+ curl -X OPTIONS -v -k --create-dirs -o "$OUTOPTDIR/$3.out" "$1/$2" 2> "$OUTOPTDIR/$3.err"
+ netdata_test_download "$OUTOPTDIR/$3.err" "$1/$2"
+}
+
+# Download information from Netdata
+netdata_wrong_request_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$ERRDIR/$3.out" "$1/$2" 2> "$ERRDIR/$3.err"
+ netdata_error_test "$ERRDIR/$3.err" "$1/$2"
+}
+
+# Download charts from Netdata
+netdata_download_charts() {
+ curl -v -k --create-dirs -o "$OUTDIR/charts.out" "$1/$2" 2> "$OUTDIR/charts.err"
+ netdata_test_download "$OUTDIR/charts.err" "$1/$2"
+
+ #Rewrite the next
+ grep -w "id" tests/charts.out| cut -d: -f2 | grep "\"," | sed s/,//g | sort
+}
+
+#Test options for a specific chart
+netdata_download_chart() {
+ SEPARATOR="&"
+ EQUAL="="
+ OUTD=$OUTDIR
+ ENCODED=" "
+ for I in $(seq 0 1); do
+ if [ "$I" -eq "1" ] ; then
+ SEPARATOR="%26"
+ EQUAL="%3D"
+ OUTD=$OUTEDIR
+ ENCODED="encoded"
+ fi
+
+ NAME=${3//\"/}
+ netdata_print_header "Getting data for $NAME using $4 $ENCODED"
+
+ LDIR=$OUTD"/"$4
+
+ LURL="$1/$2$EQUAL$NAME"
+
+ NAME=$NAME"_$4"
+
+ curl -v -k --create-dirs -o "$LDIR/$NAME.out" "$LURL" 2> "$LDIR/$NAME.err"
+ netdata_test_download "$LDIR/$NAME.err" "$LURL"
+
+ UFILES=( "points" "before" "after" )
+ COUNTER=0
+ for OPT in "points=100" "before=$PT" "after=$CT" ;
+ do
+ LURL="$LURL$SEPARATOR$OPT"
+ LFILE=$NAME"_${UFILES[$COUNTER]}";
+
+ curl -v -k --create-dirs -o "$LDIR/$LFILE.out" "$LURL" 2> "$LDIR/$LFILE.err"
+ netdata_test_download "$LDIR/$LFILE.err" "$LURL"
+
+ COUNTER=$((COUNTER + 1))
+ done
+
+ LURL="$LURL&group$EQUAL"
+ for OPT in "min" "max" "sum" "median" "stddev" "cv" "ses" "des" "incremental_sum" "average";
+ do
+ TURL=$LURL$OPT
+ TFILE=$NAME"_$OPT";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ for MORE in "jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array";
+ do
+ TURL=$TURL"&format="$MORE
+ TFILE=$NAME"_$OPT""_$MORE";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ done
+ done
+
+ LURL="$LURL$OPT&gtime=60"
+ NFILE=$NAME"_gtime"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&options=percentage"
+ NFILE=$NAME"_percentage"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&dimensions=system%7Cnice"
+ NFILE=$NAME"_dimension"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&label=testing"
+ NFILE=$NAME"_label"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+ done
+}
+
+# Download information from Netdata
+netdata_download_allmetrics() {
+ netdata_print_header "Getting All metrics"
+ LURL="$1/api/v1/allmetrics?format="
+ for FMT in "shell" "prometheus" "prometheus_all_hosts" "json" ;
+ do
+ TURL=$LURL$FMT
+ for OPT in "yes" "no";
+ do
+ if [ "$FMT" == "prometheus" ]; then
+ TURL="$TURL&help=$OPT&types=$OPT&timestamps=$OPT"
+ fi
+ TURL="$TURL&names=$OPT&oldunits=$OPT&hideunits=$OPT&prefix=ND"
+
+ NAME="allmetrics_$FMT"
+ echo "$OUTDIR/$2/$NAME.out"
+ curl -v -k --create-dirs -o "$OUTDIR/$2/$NAME.out" "$TURL" 2> "$OUTDIR/$2/$NAME.err"
+ netdata_test_download "$OUTDIR/$2/$NAME.err" "$TURL"
+ done
+ done
+}
+
+
+####################################################
+#### ####
+#### MAIN ROUTINE ####
+#### ####
+####################################################
+MURL="http://127.0.0.1:19999"
+
+if [ -n "$1" ]; then
+ MURL="$1"
+fi
+
+netdata_create_directory $OUTDIR
+netdata_create_directory $OUTEDIR
+netdata_create_directory $OUTOPTDIR
+netdata_create_directory $ERRDIR
+
+wget --no-check-certificate --execute="robots = off" --mirror --convert-links --no-parent "$MURL"
+TEST=$?
+if [ $TEST -ne "0" ] ; then
+ echo "Cannot connect to Netdata"
+ exit 1
+fi
+
+netdata_download_various "$MURL" "netdata.conf" "netdata.conf"
+
+netdata_download_various_with_options "$MURL" "netdata.conf" "netdata.conf"
+
+netdata_wrong_request_various "$MURL" "api/v15/info?this%20could%20not%20be%20here" "err_version"
+
+netdata_wrong_request_various "$MURL" "api/v1/\(*@&$\!$%%5E\)\!$*%&\)\!$*%%5E*\!%5E%\!%5E$%\!%5E%\(\!*%5E*%5E%\(*@&$%5E%\(\!%5E#*&\!^#$*&\!^%\)@\($%^\)\!*&^\(\!*&^#$&#$\)\!$%^\)\!$*%&\)#$\!^#*$^\!\(*#^#\)\!%^\!\)$*%&\!\(*&$\!^#$*&^\!*#^$\!*^\)%\(\!*&$%\)\(\!&#$\!^*#&$^\!*^%\)\!$%\)\!\(&#$\!^#*&^$" "err_version2"
+
+netdata_download_various "$MURL" "api/v1/info" "info"
+netdata_download_various_with_options "$MURL" "api/v1/info" "info"
+netdata_download_various "$MURL" "api/v1/info?this%20could%20not%20be%20here" "err_info"
+
+netdata_print_header "Getting all the netdata charts"
+CHARTS=$( netdata_download_charts "$MURL" "api/v1/charts" )
+WCHARTS=$( netdata_download_charts "$MURL" "api/v1/charts?this%20could%20not%20be%20here" )
+WCHARTS2=$( netdata_download_charts "$MURL" "api/v1/charts%3fthis%20could%20not%20be%20here" )
+
+if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then
+ echo "The number of charts does not match with division not encoded.";
+ exit 2;
+elif [ ${#CHARTS[@]} -ne ${#WCHARTS2[@]} ]; then
+ echo "The number of charts does not match when everything is encoded";
+ exit 3;
+fi
+
+netdata_wrong_request_various "$MURL" "api/v1/chart" "err_chart_without_chart"
+netdata_wrong_request_various "$MURL" "api/v1/chart?_=234231424242" "err_chart_arg"
+
+netdata_download_various "$MURL" "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+netdata_download_various_with_options "$MURL" "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+
+netdata_download_various "$MURL" "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various_with_options "$MURL" "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various "$MURL" "api/v1/chart%3Fchart=cpu.cpu0_interrupts%26_=234231424242" "chart_cpu_with_more_args_encoded2"
+netdata_download_various "$MURL" "api/v1/chart%3Fchart%3Dcpu.cpu0_interrupts%26_%3D234231424242" "chart_cpu_with_more_args_encoded3"
+
+netdata_create_directory "$OUTDIR/chart"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various "$MURL" "api/v1/chart?chart=$NAME" "chart/$NAME"
+done
+
+netdata_wrong_request_various "$MURL" "api/v1/alarm_variables" "err_alarm_variables_without_chart"
+netdata_wrong_request_various "$MURL" "api/v1/alarm_variables?_=234231424242" "err_alarm_variables_arg"
+netdata_download_various "$MURL" "api/v1/alarm_variables?chart=cpu.cpu0_interrupts&_=234231424242" "alarm_cpu_with_more_args"
+
+netdata_create_directory "$OUTDIR/alarm_variables"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various "$MURL" "api/v1/alarm_variables?chart=$NAME" "alarm_variables/$NAME"
+done
+
+netdata_create_directory "$OUTDIR/badge"
+netdata_create_directory "$OUTEDIR/badge"
+for I in $CHARTS ; do
+ netdata_download_chart "$MURL" "api/v1/badge.svg?chart" "$I" "badge"
+done
+
+netdata_create_directory "$OUTDIR/allmetrics"
+netdata_download_allmetrics "$MURL" "allmetrics"
+
+netdata_download_various "$MURL" "api/v1/alarms?all" "alarms_all"
+netdata_download_various "$MURL" "api/v1/alarms?active" "alarms_active"
+netdata_download_various "$MURL" "api/v1/alarms" "alarms_nothing"
+
+netdata_download_various "$MURL" "api/v1/alarm_log?after" "alarm_without"
+netdata_download_various "$MURL" "api/v1/alarm_log" "alarm_nothing"
+netdata_download_various "$MURL" "api/v1/alarm_log?after&_=$PT" "alarm_log"
+
+netdata_create_directory "$OUTDIR/data"
+netdata_create_directory "$OUTEDIR/data"
+for I in $CHARTS ; do
+ netdata_download_chart "$MURL" "api/v1/data?chart" "$I" "data"
+ break;
+done
+
+echo -e "${GREEN}ALL the URLS got 200 as answer! ${NOCOLOR}"
+
+exit 0
diff --git a/tests/web/easypiechart.chart.spec.js b/tests/web/easypiechart.chart.spec.js
new file mode 100644
index 0000000..23bf33d
--- /dev/null
+++ b/tests/web/easypiechart.chart.spec.js
@@ -0,0 +1,39 @@
+"use strict";
+
+
+// with xdescribe, this is skipped.
+describe("creation of easy pie charts", function () {
+
+ beforeAll(function () {
+ // karma stores the loaded files relative to "base/".
+ // This command is needed to load HTML fixtures
+ jasmine.getFixtures().fixturesPath = "base/tests/web/fixtures";
+ });
+
+ it("should create new chart, but it's failure is expected for demonstration purpose", function () {
+ // arrange
+ // Theoretically we can load some html. What about jquery? could this work?
+ // https://stackoverflow.com/questions/5337481/spying-on-jquery-selectors-in-jasmine
+ loadFixtures("easypiechart.chart.fixture1.html");
+
+ // for easy pie chart, we can fake the data result:
+ var data = {
+ result: [5]
+ };
+ // act
+ var result = NETDATA.easypiechartChartCreate(createState(), data);
+ // assert
+ expect(result).toBe(true);
+ });
+
+ function createState(min, max) {
+ // create a fake state with only needed properties.
+ return {
+ tmp: {
+ easyPieChartMin: min,
+ easyPieChartMax: max
+ }
+ };
+ }
+
+});
diff --git a/tests/web/easypiechart.percentage.spec.js b/tests/web/easypiechart.percentage.spec.js
new file mode 100644
index 0000000..976b339
--- /dev/null
+++ b/tests/web/easypiechart.percentage.spec.js
@@ -0,0 +1,142 @@
+"use strict";
+
+
+describe("percentage calculations for easy pie charts with dynamic range", function () {
+
+ it("should return positive value, if value greater than dynamic max", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 6, 2, 10);
+
+ expect(result).toBe(60);
+ });
+
+ it("should return negative value, if value lesser than dynamic min", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -6, -10, 10);
+
+ expect(result).toBe(-60);
+ });
+
+ it("should return 0 if value is zero and min negative, max positive", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, -1, 2);
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0.1 if value and min are zero and max positive", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, 0, 2);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return -0.1 if value is zero, max and min negative", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, -2, -1);
+
+ expect(result).toBe(-0.1);
+ });
+
+ it("should return positive value, if max is user-defined", function () {
+ var state = createState(null, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 46, -40, 50);
+
+ expect(result).toBe(92);
+ });
+
+ it("should return negative value, if min is user-defined", function () {
+ var state = createState(-50, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -46, -50, 40);
+
+ expect(result).toBe(-92);
+ });
+
+});
+
+describe("percentage calculations for easy pie charts with fixed range", function () {
+
+ it("should return positive value, if min and max are user-defined", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 46, 40, 50);
+
+ expect(result).toBe(60);
+ });
+
+ it("should return 100 if positive min and max are user-defined, but value is greater than max", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 60, 40, 50);
+
+ expect(result).toBe(100);
+ });
+
+ it("should return 0.1 if positive min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 39.9, 42, 48);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return -100 if negative min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(-40, -50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -50.1, -40, -50);
+
+ expect(result).toBe(-100);
+ });
+
+ it("should return 0.1 if negative min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(-40, -50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -50.1, -20, -45);
+
+ expect(result).toBe(-100);
+ });
+});
+
+describe("percentage calculations for easy pie charts with invalid input", function () {
+
+ it("should return 0.1 if value undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, null, 40, 50);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return positive value if min is undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 1, null, 2);
+
+ expect(result).toBe(50);
+ });
+
+ it("should return positive if max is undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 21, 42, null);
+
+ expect(result).toBe(50);
+ });
+});
+
+function createState(min, max) {
+ // create a fake state with only the needed properties.
+ return {
+ tmp: {
+ easyPieChartMin: min,
+ easyPieChartMax: max
+ }
+ };
+}
diff --git a/tests/web/fixtures/easypiechart.chart.fixture1.html b/tests/web/fixtures/easypiechart.chart.fixture1.html
new file mode 100644
index 0000000..f0f4eb7
--- /dev/null
+++ b/tests/web/fixtures/easypiechart.chart.fixture1.html
@@ -0,0 +1,6 @@
+<div data-netdata="system.cpu"
+ data-chart-library="easypiechart"
+ data-width="5%"
+ data-height="20"
+ data-after="-30"
+></div> \ No newline at end of file
diff --git a/tests/web/karma.conf.js b/tests/web/karma.conf.js
new file mode 100644
index 0000000..b3ee094
--- /dev/null
+++ b/tests/web/karma.conf.js
@@ -0,0 +1,110 @@
+// Karma configuration
+// Generated on Sun Jul 16 2017 02:28:05 GMT+0200 (CEST)
+
+module.exports = function (config) {
+ config.set({
+
+ // base path that will be used to resolve all patterns (eg. files, exclude)
+ // this path should always resolve so that "." is the "netdata" root folder.
+ basePath: '../../',
+
+ // frameworks to use
+ // available frameworks: https://npmjs.org/browse/keyword/karma-adapter
+ frameworks: ['jasmine'],
+
+
+ // list of files / patterns to load in the browser
+ files: [
+ // order matters! load jquery libraries first
+ 'web/lib/jquery*.js',
+ // our jasmine libs and fixtures
+ 'tests/web/lib/*.js',
+ 'tests/web/fixtures/*.html',
+ // then bootstrap
+ 'web/lib/bootstrap*.js',
+ // then the rest
+ 'web/lib/perfect-scrollbar*.js',
+ 'web/lib/dygraph*.js',
+ 'web/lib/gauge*.js',
+ 'web/lib/morris*.js',
+ 'web/lib/raphael*.js',
+ 'web/lib/tableExport*.js',
+ 'web/lib/d3*.js',
+ 'web/lib/c3*.js',
+ // some CSS
+ 'web/css/*.css',
+ 'web/dashboard.css',
+ // our dashboard
+ 'web/dashboard.js',
+ // finally our test specs
+ 'tests/web/*.spec.js',
+ ],
+
+
+ // list of files to exclude
+ exclude: [],
+
+
+ // preprocess matching files before serving them to the browser
+ // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
+ preprocessors: {
+ 'web/dashboard.js': ['coverage']
+ },
+
+
+ // test results reporter to use
+ // possible values: 'dots', 'progress'
+ // available reporters: https://npmjs.org/browse/keyword/karma-reporter
+ reporters: ['progress', 'coverage'],
+
+ // optionally, configure the reporter
+ coverageReporter: {
+ type : 'html',
+ dir : 'coverage/'
+ },
+
+ // web server port
+ port: 9876,
+
+
+ // enable / disable colors in the output (reporters and logs)
+ colors: true,
+
+
+ // level of logging
+ // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
+ logLevel: config.LOG_INFO,
+
+
+ // enable / disable watching file and executing tests whenever any file changes
+ autoWatch: false,
+ // not needed with WebStorm. Just hit Alt+Shift+R to rerun.
+
+ // start these browsers
+ // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
+ browsers: ['Chromium', 'ChromiumHeadless'],
+
+ customLaunchers: {
+ // Headless browsers could be useful for CI integration, if installed.
+ ChromiumHeadless: {
+ // needs Chrome/Chromium version >= 59
+ // see https://chromium.googlesource.com/chromium/src/+/lkgr/headless/README.md
+ base: "Chromium",
+ flags: [
+ "--headless",
+ "--disable-gpu",
+ // Without a remote debugging port, Chromium exits immediately.
+ "--remote-debugging-port=9222"
+ ]
+ }
+ },
+
+ // Continuous Integration mode
+ // if true, Karma captures browsers, runs the tests and exits
+ singleRun: false,
+
+ // Concurrency level
+ // how many browser should be started simultaneous
+ concurrency: Infinity
+ })
+};
diff --git a/tests/web/lib/jasmine-jquery.js b/tests/web/lib/jasmine-jquery.js
new file mode 100644
index 0000000..6e4611c
--- /dev/null
+++ b/tests/web/lib/jasmine-jquery.js
@@ -0,0 +1,841 @@
+/*!
+ Jasmine-jQuery: a set of jQuery helpers for Jasmine tests.
+
+ Version 2.1.1
+
+ https://github.com/velesin/jasmine-jquery
+
+ Copyright (c) 2010-2014 Wojciech Zawistowski, Travis Jeffery
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+(function (root, factory) {
+ if (typeof module !== 'undefined' && module.exports && typeof exports !== 'undefined') {
+ factory(root, root.jasmine, require('jquery'));
+ } else {
+ factory(root, root.jasmine, root.jQuery);
+ }
+}((function() {return this; })(), function (window, jasmine, $) { "use strict";
+
+ jasmine.spiedEventsKey = function (selector, eventName) {
+ return [$(selector).selector, eventName].toString()
+ }
+
+ jasmine.getFixtures = function () {
+ return jasmine.currentFixtures_ = jasmine.currentFixtures_ || new jasmine.Fixtures()
+ }
+
+ jasmine.getStyleFixtures = function () {
+ return jasmine.currentStyleFixtures_ = jasmine.currentStyleFixtures_ || new jasmine.StyleFixtures()
+ }
+
+ jasmine.Fixtures = function () {
+ this.containerId = 'jasmine-fixtures'
+ this.fixturesCache_ = {}
+ this.fixturesPath = 'spec/javascripts/fixtures'
+ }
+
+ jasmine.Fixtures.prototype.set = function (html) {
+ this.cleanUp()
+ return this.createContainer_(html)
+ }
+
+ jasmine.Fixtures.prototype.appendSet= function (html) {
+ this.addToContainer_(html)
+ }
+
+ jasmine.Fixtures.prototype.preload = function () {
+ this.read.apply(this, arguments)
+ }
+
+ jasmine.Fixtures.prototype.load = function () {
+ this.cleanUp()
+ this.createContainer_(this.read.apply(this, arguments))
+ }
+
+ jasmine.Fixtures.prototype.appendLoad = function () {
+ this.addToContainer_(this.read.apply(this, arguments))
+ }
+
+ jasmine.Fixtures.prototype.read = function () {
+ var htmlChunks = []
+ , fixtureUrls = arguments
+
+ for(var urlCount = fixtureUrls.length, urlIndex = 0; urlIndex < urlCount; urlIndex++) {
+ htmlChunks.push(this.getFixtureHtml_(fixtureUrls[urlIndex]))
+ }
+
+ return htmlChunks.join('')
+ }
+
+ jasmine.Fixtures.prototype.clearCache = function () {
+ this.fixturesCache_ = {}
+ }
+
+ jasmine.Fixtures.prototype.cleanUp = function () {
+ $('#' + this.containerId).remove()
+ }
+
+ jasmine.Fixtures.prototype.sandbox = function (attributes) {
+ var attributesToSet = attributes || {}
+ return $('<div id="sandbox" />').attr(attributesToSet)
+ }
+
+ jasmine.Fixtures.prototype.createContainer_ = function (html) {
+ var container = $('<div>')
+ .attr('id', this.containerId)
+ .html(html)
+
+ $(document.body).append(container)
+ return container
+ }
+
+ jasmine.Fixtures.prototype.addToContainer_ = function (html){
+ var container = $(document.body).find('#'+this.containerId).append(html)
+
+ if (!container.length) {
+ this.createContainer_(html)
+ }
+ }
+
+ jasmine.Fixtures.prototype.getFixtureHtml_ = function (url) {
+ if (typeof this.fixturesCache_[url] === 'undefined') {
+ this.loadFixtureIntoCache_(url)
+ }
+ return this.fixturesCache_[url]
+ }
+
+ jasmine.Fixtures.prototype.loadFixtureIntoCache_ = function (relativeUrl) {
+ var self = this
+ , url = this.makeFixtureUrl_(relativeUrl)
+ , htmlText = ''
+ , request = $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ url: url,
+ dataType: 'html',
+ success: function (data, status, $xhr) {
+ htmlText = $xhr.responseText
+ }
+ }).fail(function ($xhr, status, err) {
+ throw new Error('Fixture could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ })
+
+ var scripts = $($.parseHTML(htmlText, true)).find('script[src]') || [];
+
+ scripts.each(function(){
+ $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ dataType: 'script',
+ url: $(this).attr('src'),
+ success: function (data, status, $xhr) {
+ htmlText += '<script>' + $xhr.responseText + '</script>'
+ },
+ error: function ($xhr, status, err) {
+ throw new Error('Script could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ }
+ });
+ })
+
+ self.fixturesCache_[relativeUrl] = htmlText;
+ }
+
+ jasmine.Fixtures.prototype.makeFixtureUrl_ = function (relativeUrl){
+ return this.fixturesPath.match('/$') ? this.fixturesPath + relativeUrl : this.fixturesPath + '/' + relativeUrl
+ }
+
+ jasmine.Fixtures.prototype.proxyCallTo_ = function (methodName, passedArguments) {
+ return this[methodName].apply(this, passedArguments)
+ }
+
+
+ jasmine.StyleFixtures = function () {
+ this.fixturesCache_ = {}
+ this.fixturesNodes_ = []
+ this.fixturesPath = 'spec/javascripts/fixtures'
+ }
+
+ jasmine.StyleFixtures.prototype.set = function (css) {
+ this.cleanUp()
+ this.createStyle_(css)
+ }
+
+ jasmine.StyleFixtures.prototype.appendSet = function (css) {
+ this.createStyle_(css)
+ }
+
+ jasmine.StyleFixtures.prototype.preload = function () {
+ this.read_.apply(this, arguments)
+ }
+
+ jasmine.StyleFixtures.prototype.load = function () {
+ this.cleanUp()
+ this.createStyle_(this.read_.apply(this, arguments))
+ }
+
+ jasmine.StyleFixtures.prototype.appendLoad = function () {
+ this.createStyle_(this.read_.apply(this, arguments))
+ }
+
+ jasmine.StyleFixtures.prototype.cleanUp = function () {
+ while(this.fixturesNodes_.length) {
+ this.fixturesNodes_.pop().remove()
+ }
+ }
+
+ jasmine.StyleFixtures.prototype.createStyle_ = function (html) {
+ var styleText = $('<div></div>').html(html).text()
+ , style = $('<style>' + styleText + '</style>')
+
+ this.fixturesNodes_.push(style)
+ $('head').append(style)
+ }
+
+ jasmine.StyleFixtures.prototype.clearCache = jasmine.Fixtures.prototype.clearCache
+ jasmine.StyleFixtures.prototype.read_ = jasmine.Fixtures.prototype.read
+ jasmine.StyleFixtures.prototype.getFixtureHtml_ = jasmine.Fixtures.prototype.getFixtureHtml_
+ jasmine.StyleFixtures.prototype.loadFixtureIntoCache_ = jasmine.Fixtures.prototype.loadFixtureIntoCache_
+ jasmine.StyleFixtures.prototype.makeFixtureUrl_ = jasmine.Fixtures.prototype.makeFixtureUrl_
+ jasmine.StyleFixtures.prototype.proxyCallTo_ = jasmine.Fixtures.prototype.proxyCallTo_
+
+ jasmine.getJSONFixtures = function () {
+ return jasmine.currentJSONFixtures_ = jasmine.currentJSONFixtures_ || new jasmine.JSONFixtures()
+ }
+
+ jasmine.JSONFixtures = function () {
+ this.fixturesCache_ = {}
+ this.fixturesPath = 'spec/javascripts/fixtures/json'
+ }
+
+ jasmine.JSONFixtures.prototype.load = function () {
+ this.read.apply(this, arguments)
+ return this.fixturesCache_
+ }
+
+ jasmine.JSONFixtures.prototype.read = function () {
+ var fixtureUrls = arguments
+
+ for(var urlCount = fixtureUrls.length, urlIndex = 0; urlIndex < urlCount; urlIndex++) {
+ this.getFixtureData_(fixtureUrls[urlIndex])
+ }
+
+ return this.fixturesCache_
+ }
+
+ jasmine.JSONFixtures.prototype.clearCache = function () {
+ this.fixturesCache_ = {}
+ }
+
+ jasmine.JSONFixtures.prototype.getFixtureData_ = function (url) {
+ if (!this.fixturesCache_[url]) this.loadFixtureIntoCache_(url)
+ return this.fixturesCache_[url]
+ }
+
+ jasmine.JSONFixtures.prototype.loadFixtureIntoCache_ = function (relativeUrl) {
+ var self = this
+ , url = this.fixturesPath.match('/$') ? this.fixturesPath + relativeUrl : this.fixturesPath + '/' + relativeUrl
+
+ $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ dataType: 'json',
+ url: url,
+ success: function (data) {
+ self.fixturesCache_[relativeUrl] = data
+ },
+ error: function ($xhr, status, err) {
+ throw new Error('JSONFixture could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ }
+ })
+ }
+
+ jasmine.JSONFixtures.prototype.proxyCallTo_ = function (methodName, passedArguments) {
+ return this[methodName].apply(this, passedArguments)
+ }
+
+ jasmine.jQuery = function () {}
+
+ jasmine.jQuery.browserTagCaseIndependentHtml = function (html) {
+ return $('<div/>').append(html).html()
+ }
+
+ jasmine.jQuery.elementToString = function (element) {
+ return $(element).map(function () { return this.outerHTML; }).toArray().join(', ')
+ }
+
+ var data = {
+ spiedEvents: {}
+ , handlers: []
+ }
+
+ jasmine.jQuery.events = {
+ spyOn: function (selector, eventName) {
+ var handler = function (e) {
+ var calls = (typeof data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] !== 'undefined') ? data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : 0
+ data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] = {
+ args: jasmine.util.argsToArray(arguments),
+ calls: ++calls
+ }
+ }
+
+ $(selector).on(eventName, handler)
+ data.handlers.push(handler)
+
+ return {
+ selector: selector,
+ eventName: eventName,
+ handler: handler,
+ reset: function (){
+ delete data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ },
+ calls: {
+ count: function () {
+ return data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] ?
+ data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : 0;
+ },
+ any: function () {
+ return data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] ?
+ !!data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : false;
+ }
+ }
+ }
+ },
+
+ args: function (selector, eventName) {
+ var actualArgs = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].args
+
+ if (!actualArgs) {
+ throw "There is no spy for " + eventName + " on " + selector.toString() + ". Make sure to create a spy using spyOnEvent."
+ }
+
+ return actualArgs
+ },
+
+ wasTriggered: function (selector, eventName) {
+ return !!(data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)])
+ },
+
+ wasTriggeredWith: function (selector, eventName, expectedArgs, util, customEqualityTesters) {
+ var actualArgs = jasmine.jQuery.events.args(selector, eventName).slice(1)
+
+ if (Object.prototype.toString.call(expectedArgs) !== '[object Array]')
+ actualArgs = actualArgs[0]
+
+ return util.equals(actualArgs, expectedArgs, customEqualityTesters)
+ },
+
+ wasPrevented: function (selector, eventName) {
+ var spiedEvent = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ , args = (jasmine.util.isUndefined(spiedEvent)) ? {} : spiedEvent.args
+ , e = args ? args[0] : undefined
+
+ return e && e.isDefaultPrevented()
+ },
+
+ wasStopped: function (selector, eventName) {
+ var spiedEvent = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ , args = (jasmine.util.isUndefined(spiedEvent)) ? {} : spiedEvent.args
+ , e = args ? args[0] : undefined
+
+ return e && e.isPropagationStopped()
+ },
+
+ cleanUp: function () {
+ data.spiedEvents = {}
+ data.handlers = []
+ }
+ }
+
+ var hasProperty = function (actualValue, expectedValue) {
+ if (expectedValue === undefined)
+ return actualValue !== undefined
+
+ return actualValue === expectedValue
+ }
+
+ beforeEach(function () {
+ jasmine.addMatchers({
+ toHaveClass: function () {
+ return {
+ compare: function (actual, className) {
+ return { pass: $(actual).hasClass(className) }
+ }
+ }
+ },
+
+ toHaveCss: function () {
+ return {
+ compare: function (actual, css) {
+ var stripCharsRegex = /[\s;\"\']/g
+ for (var prop in css) {
+ var value = css[prop]
+ // see issue #147 on gh
+ ;if ((value === 'auto') && ($(actual).get(0).style[prop] === 'auto')) continue
+ var actualStripped = $(actual).css(prop).replace(stripCharsRegex, '')
+ var valueStripped = value.replace(stripCharsRegex, '')
+ if (actualStripped !== valueStripped) return { pass: false }
+ }
+ return { pass: true }
+ }
+ }
+ },
+
+ toBeVisible: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':visible') }
+ }
+ }
+ },
+
+ toBeHidden: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':hidden') }
+ }
+ }
+ },
+
+ toBeSelected: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':selected') }
+ }
+ }
+ },
+
+ toBeChecked: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':checked') }
+ }
+ }
+ },
+
+ toBeEmpty: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':empty') }
+ }
+ }
+ },
+
+ toBeInDOM: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $.contains(document.documentElement, $(actual)[0]) }
+ }
+ }
+ },
+
+ toExist: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).length }
+ }
+ }
+ },
+
+ toHaveLength: function () {
+ return {
+ compare: function (actual, length) {
+ return { pass: $(actual).length === length }
+ }
+ }
+ },
+
+ toHaveAttr: function () {
+ return {
+ compare: function (actual, attributeName, expectedAttributeValue) {
+ return { pass: hasProperty($(actual).attr(attributeName), expectedAttributeValue) }
+ }
+ }
+ },
+
+ toHaveProp: function () {
+ return {
+ compare: function (actual, propertyName, expectedPropertyValue) {
+ return { pass: hasProperty($(actual).prop(propertyName), expectedPropertyValue) }
+ }
+ }
+ },
+
+ toHaveId: function () {
+ return {
+ compare: function (actual, id) {
+ return { pass: $(actual).attr('id') == id }
+ }
+ }
+ },
+
+ toHaveHtml: function () {
+ return {
+ compare: function (actual, html) {
+ return { pass: $(actual).html() == jasmine.jQuery.browserTagCaseIndependentHtml(html) }
+ }
+ }
+ },
+
+ toContainHtml: function () {
+ return {
+ compare: function (actual, html) {
+ var actualHtml = $(actual).html()
+ , expectedHtml = jasmine.jQuery.browserTagCaseIndependentHtml(html)
+
+ return { pass: (actualHtml.indexOf(expectedHtml) >= 0) }
+ }
+ }
+ },
+
+ toHaveText: function () {
+ return {
+ compare: function (actual, text) {
+ var actualText = $(actual).text()
+ var trimmedText = $.trim(actualText)
+
+ if (text && $.isFunction(text.test)) {
+ return { pass: text.test(actualText) || text.test(trimmedText) }
+ } else {
+ return { pass: (actualText == text || trimmedText == text) }
+ }
+ }
+ }
+ },
+
+ toContainText: function () {
+ return {
+ compare: function (actual, text) {
+ var trimmedText = $.trim($(actual).text())
+
+ if (text && $.isFunction(text.test)) {
+ return { pass: text.test(trimmedText) }
+ } else {
+ return { pass: trimmedText.indexOf(text) != -1 }
+ }
+ }
+ }
+ },
+
+ toHaveValue: function () {
+ return {
+ compare: function (actual, value) {
+ return { pass: $(actual).val() === value }
+ }
+ }
+ },
+
+ toHaveData: function () {
+ return {
+ compare: function (actual, key, expectedValue) {
+ return { pass: hasProperty($(actual).data(key), expectedValue) }
+ }
+ }
+ },
+
+ toContainElement: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).find(selector).length }
+ }
+ }
+ },
+
+ toBeMatchedBy: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).filter(selector).length }
+ }
+ }
+ },
+
+ toBeDisabled: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).is(':disabled') }
+ }
+ }
+ },
+
+ toBeFocused: function (selector) {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual)[0] === $(actual)[0].ownerDocument.activeElement }
+ }
+ }
+ },
+
+ toHandle: function () {
+ return {
+ compare: function (actual, event) {
+ if ( !actual || actual.length === 0 ) return { pass: false };
+ var events = $._data($(actual).get(0), "events")
+
+ if (!events || !event || typeof event !== "string") {
+ return { pass: false }
+ }
+
+ var namespaces = event.split(".")
+ , eventType = namespaces.shift()
+ , sortedNamespaces = namespaces.slice(0).sort()
+ , namespaceRegExp = new RegExp("(^|\\.)" + sortedNamespaces.join("\\.(?:.*\\.)?") + "(\\.|$)")
+
+ if (events[eventType] && namespaces.length) {
+ for (var i = 0; i < events[eventType].length; i++) {
+ var namespace = events[eventType][i].namespace
+
+ if (namespaceRegExp.test(namespace))
+ return { pass: true }
+ }
+ } else {
+ return { pass: (events[eventType] && events[eventType].length > 0) }
+ }
+
+ return { pass: false }
+ }
+ }
+ },
+
+ toHandleWith: function () {
+ return {
+ compare: function (actual, eventName, eventHandler) {
+ if ( !actual || actual.length === 0 ) return { pass: false };
+ var normalizedEventName = eventName.split('.')[0]
+ , stack = $._data($(actual).get(0), "events")[normalizedEventName]
+
+ for (var i = 0; i < stack.length; i++) {
+ if (stack[i].handler == eventHandler) return { pass: true }
+ }
+
+ return { pass: false }
+ }
+ }
+ },
+
+ toHaveBeenTriggeredOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasTriggered(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + $(actual) + " not to have been triggered on " + selector :
+ "Expected event " + $(actual) + " to have been triggered on " + selector
+
+ return result;
+ }
+ }
+ },
+
+ toHaveBeenTriggered: function (){
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasTriggered(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been triggered on " + selector :
+ "Expected event " + eventName + " to have been triggered on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenTriggeredOnAndWith: function (j$, customEqualityTesters) {
+ return {
+ compare: function (actual, selector, expectedArgs) {
+ var wasTriggered = jasmine.jQuery.events.wasTriggered(selector, actual)
+ , result = { pass: wasTriggered && jasmine.jQuery.events.wasTriggeredWith(selector, actual, expectedArgs, j$, customEqualityTesters) }
+
+ if (wasTriggered) {
+ var actualArgs = jasmine.jQuery.events.args(selector, actual, expectedArgs)[1]
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been triggered with " + jasmine.pp(expectedArgs) + " but it was triggered with " + jasmine.pp(actualArgs) :
+ "Expected event " + actual + " to have been triggered with " + jasmine.pp(expectedArgs) + " but it was triggered with " + jasmine.pp(actualArgs)
+
+ } else {
+ // todo check on this
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been triggered on " + selector :
+ "Expected event " + actual + " to have been triggered on " + selector
+ }
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenPreventedOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasPrevented(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been prevented on " + selector :
+ "Expected event " + actual + " to have been prevented on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenPrevented: function () {
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasPrevented(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been prevented on " + selector :
+ "Expected event " + eventName + " to have been prevented on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenStoppedOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasStopped(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been stopped on " + selector :
+ "Expected event " + actual + " to have been stopped on " + selector
+
+ return result;
+ }
+ }
+ },
+
+ toHaveBeenStopped: function () {
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasStopped(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been stopped on " + selector :
+ "Expected event " + eventName + " to have been stopped on " + selector
+
+ return result
+ }
+ }
+ }
+ })
+
+ jasmine.getEnv().addCustomEqualityTester(function(a, b) {
+ if (a && b) {
+ if (a instanceof $ || jasmine.isDomNode(a)) {
+ var $a = $(a)
+
+ if (b instanceof $)
+ return $a.length == b.length && $a.is(b)
+
+ return $a.is(b);
+ }
+
+ if (b instanceof $ || jasmine.isDomNode(b)) {
+ var $b = $(b)
+
+ if (a instanceof $)
+ return a.length == $b.length && $b.is(a)
+
+ return $b.is(a);
+ }
+ }
+ })
+
+ jasmine.getEnv().addCustomEqualityTester(function (a, b) {
+ if (a instanceof $ && b instanceof $ && a.size() == b.size())
+ return a.is(b)
+ })
+ })
+
+ afterEach(function () {
+ jasmine.getFixtures().cleanUp()
+ jasmine.getStyleFixtures().cleanUp()
+ jasmine.jQuery.events.cleanUp()
+ })
+
+ window.readFixtures = function () {
+ return jasmine.getFixtures().proxyCallTo_('read', arguments)
+ }
+
+ window.preloadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('preload', arguments)
+ }
+
+ window.loadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.appendLoadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('appendLoad', arguments)
+ }
+
+ window.setFixtures = function (html) {
+ return jasmine.getFixtures().proxyCallTo_('set', arguments)
+ }
+
+ window.appendSetFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('appendSet', arguments)
+ }
+
+ window.sandbox = function (attributes) {
+ return jasmine.getFixtures().sandbox(attributes)
+ }
+
+ window.spyOnEvent = function (selector, eventName) {
+ return jasmine.jQuery.events.spyOn(selector, eventName)
+ }
+
+ window.preloadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('preload', arguments)
+ }
+
+ window.loadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.appendLoadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('appendLoad', arguments)
+ }
+
+ window.setStyleFixtures = function (html) {
+ jasmine.getStyleFixtures().proxyCallTo_('set', arguments)
+ }
+
+ window.appendSetStyleFixtures = function (html) {
+ jasmine.getStyleFixtures().proxyCallTo_('appendSet', arguments)
+ }
+
+ window.loadJSONFixtures = function () {
+ return jasmine.getJSONFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.getJSONFixture = function (url) {
+ return jasmine.getJSONFixtures().proxyCallTo_('read', arguments)[url]
+ }
+}));