summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-03 10:23:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-03 10:23:38 +0000
commit574098461cd45be12a497afbdac6f93c58978387 (patch)
tree9eb60a5930b7c20d42f7fde1e234cae3968ed3d9 /tests
parentAdding upstream version 1.16.1. (diff)
downloadnetdata-574098461cd45be12a497afbdac6f93c58978387.tar.xz
netdata-574098461cd45be12a497afbdac6f93c58978387.zip
Adding upstream version 1.17.0.upstream/1.17.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/Makefile.am3
-rw-r--r--tests/Makefile.in631
-rw-r--r--tests/README.md56
-rw-r--r--tests/acls/acl.sh119
-rw-r--r--tests/acls/netdata.cfg20
-rw-r--r--tests/acls/netdata.ssl.cfg24
-rw-r--r--tests/backends/prometheus-avg-oldunits.txt148
-rw-r--r--tests/backends/prometheus-avg.txt148
-rw-r--r--tests/backends/prometheus-raw.txt156
-rwxr-xr-xtests/backends/prometheus.bats31
-rw-r--r--tests/health_mgmtapi/README.md12
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/HOSTS-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/RESET-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_2-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_3-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json1
-rw-r--r--tests/health_mgmtapi/health-cmdapi-test.sh226
-rwxr-xr-xtests/installer/checksums.sh51
-rwxr-xr-xtests/installer/slack.sh65
-rw-r--r--tests/k6/data.js67
-rwxr-xr-xtests/lifecycle.bats61
-rw-r--r--tests/profile/Makefile53
-rw-r--r--tests/profile/benchmark-dictionary.c130
-rw-r--r--tests/profile/benchmark-line-parsing.c707
-rw-r--r--tests/profile/benchmark-procfile-parser.c329
-rw-r--r--tests/profile/benchmark-registry.c227
-rw-r--r--tests/profile/benchmark-value-pairs.c623
-rw-r--r--tests/profile/statsd-stress.c151
-rw-r--r--tests/profile/test-eval.c299
-rwxr-xr-xtests/updater_checks.bats70
-rwxr-xr-xtests/updater_checks.sh78
-rw-r--r--tests/urls/request.sh301
-rw-r--r--tests/urls/request.sh.in10
44 files changed, 1311 insertions, 3501 deletions
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 92e6db0f3..0aa5af247 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -5,11 +5,8 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
health_mgmtapi/health-cmdapi-test.sh \
-<<<<<<< HEAD
acls/acl.sh \
-=======
urls/request.sh \
->>>>>>> 63a4cadd346df71255d2350128eebcf317e81d0f
$(NULL)
include $(top_srcdir)/build/subst.inc
diff --git a/tests/Makefile.in b/tests/Makefile.in
new file mode 100644
index 000000000..a539b06ca
--- /dev/null
+++ b/tests/Makefile.in
@@ -0,0 +1,631 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = tests
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_SCRIPTS) \
+ $(dist_plugins_SCRIPTS) $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_noinst_SCRIPTS) $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_noinst_DATA = \
+ README.md \
+ web/lib/jasmine-jquery.js \
+ web/easypiechart.chart.spec.js \
+ web/easypiechart.percentage.spec.js \
+ web/karma.conf.js \
+ web/fixtures/easypiechart.chart.fixture1.html \
+ node.d/fronius.chart.spec.js \
+ node.d/fronius.parse.spec.js \
+ node.d/fronius.process.spec.js \
+ node.d/fronius.validation.spec.js \
+ health_mgmtapi/health-cmdapi-test.sh.in \
+ acls/acl.sh.in \
+ urls/request.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ stress.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tests/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu tests/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/tests/README.md b/tests/README.md
index 4ac3f2105..fe8b8b203 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -1,4 +1,5 @@
# Testing
+
This readme is a manual on how to get started with unit testing on javascript and nodejs
Original author: BrainDoctor (github), July 2017
@@ -24,10 +25,10 @@ Other browsers work too (Chrome, Firefox). However, only the Chromium Browser 59
The commands above leave me with the following versions (July 2017):
- - nodejs: v4.2.6
- - npm: 3.5.2
- - chromium-browser: 59.0.3071.109
- - WebStorm (optional): 2017.1.4
+- nodejs: v4.2.6
+- npm: 3.5.2
+- chromium-browser: 59.0.3071.109
+- WebStorm (optional): 2017.1.4
## Configuration
@@ -46,22 +47,24 @@ If you use the JetBrains WebStorm IDE, you can integrate the karma runtime.
#### for Karma (Client side testing)
Headless Chromium:
-1. Run > Edit Configurations
-2. "+" > Karma
-3. - Name: Karma Headless Chromium
- - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
- - Browsers to start: ChromiumHeadless
- - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
- - Karma package: /path/to/your/netdata/node_modules/karma
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Headless Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: ChromiumHeadless
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
GUI Chromium is similar:
-1. Run > Edit Configurations
-2. "+" > Karma
-3. - Name: Karma Chromium
- - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
- - Browsers to start: Chromium
- - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
- - Karma package: /path/to/your/netdata/node_modules/karma
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: Chromium
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
You may add other browsers too (comma separated). With the "Browsers to start" field you can override any settings in karma.conf.js.
@@ -69,18 +72,19 @@ Also it is recommended to install WebStorm IDE Extension/Addon to Chrome/Chromiu
#### for node.d plugins (nodejs)
-1. Run > Edit Configurations
-2. "+" > Node.js
-3. - Name: Node.d plugins
- - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
- - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
- - Application parameters: --captureExceptions tests/node.d
+1. Run > Edit Configurations
+2. "+" > Node.js
+3. - Name: Node.d plugins
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
+ - Application parameters: --captureExceptions tests/node.d
## Running
### In WebStorm
#### Karma
+
Just run the configured run configurations and they produce nice test trees:
![karma_run_2](https://user-images.githubusercontent.com/12159026/28277789-559149f6-6b1b-11e7-9cc7-a81d81d12c35.png)
@@ -99,6 +103,7 @@ cd /path/to/your/netdata
nodejs ./node_modules/karma/bin/karma start tests/web/karma.conf.js --single-run=true --browsers=ChromiumHeadless
```
+
will start the karma server, start chromium in headless mode and exit.
If a test fails, it produces even a stack trace:
@@ -135,5 +140,4 @@ The karma and node.d runners can be integrated in Travis (AFAIK), but that is ou
Note: Karma is for browser-testing. On a build server, no GUI or browser might by available, unless browsers support headless mode.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/tests/acls/acl.sh b/tests/acls/acl.sh
new file mode 100644
index 000000000..772d66408
--- /dev/null
+++ b/tests/acls/acl.sh
@@ -0,0 +1,119 @@
+#!/bin/bash -x
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+BASICURL="http://127.0.0.1"
+BASICURLS="https://127.0.0.1"
+
+NETDATA_VARLIB_DIR="/var/lib/netdata"
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;43m'
+
+#change the previous acl file and with a new
+#and store it on a new file
+change_file(){
+ sed "s/$1/$2/g" netdata.cfg > "$4"
+}
+
+change_ssl_file(){
+ KEYROW="ssl key = $3/key.pem"
+ CERTROW="ssl certificate = $3/cert.pem"
+ sed "s@ssl key =@$KEYROW@g" netdata.ssl.cfg > tmp
+ sed "s@ssl certificate =@$CERTROW@g" tmp > tmp2
+ sed "s/$1/$2/g" tmp2 > "$4"
+}
+
+run_acl_tests() {
+ #Give a time for netdata start properly
+ sleep 2
+
+ curl -v -k --tls-max 1.2 --create-dirs -o index.html "$2" 2> log_index.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o netdata.txt "$2/netdata.conf" 2> log_nc.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o badge.csv "$2/api/v1/badge.svg?chart=cpu.cpu0_interrupts" 2> log_badge.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o info.txt "$2/api/v1/info" 2> log_info.txt
+ curl -H "X-Auth-Token: $1" -v -k --tls-max 1.2 --create-dirs -o health.csv "$2/api/v1/manage/health?cmd=LIST" 2> log_health.txt
+
+ TOT=$(grep -c "HTTP/1.1 301" log_*.txt | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$4" ]; then
+ echo -e "${RED}I got a wrong number of redirects($TOT) when SSL is activated, It was expected $4"
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ killall netdata
+ exit 1
+ elif [ "$TOT" -eq "$4" ] && [ "$4" -ne "0" ]; then
+ echo -e "${YELLOW}I got the correct number of redirects($4) when SSL is activated and I try to access with HTTP."
+ return
+ fi
+
+ TOT=$(grep -c "HTTP/1.1 200 OK" log_* | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$3" ]; then
+ echo -e "${RED}I got a wrong number of \"200 OK\" from the queries, it was expected $3."
+ killall netdata
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ exit 1
+ fi
+
+ echo -e "${GREEN}ACLs were applied correctly"
+}
+
+CONF=$(grep "bind" netdata.cfg)
+MUSER=$(grep run netdata.cfg | cut -d= -f2|sed 's/^[ \t]*//')
+
+openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -sha512 -subj "/C=US/ST=Denied/L=Somewhere/O=Dis/CN=www.example.com" -keyout key.pem -out cert.pem
+chown "$MUSER" key.pem cert.pem
+CWD=$(pwd)
+
+if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+else
+ TOKEN="NULL"
+fi
+
+change_file "$CONF" " bind to = *" "$CWD" "netdata.conf.test0"
+netdata -c "netdata.conf.test0"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 0
+killall netdata
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=optional *:20002=dashboard|registry" "$CWD" "netdata.conf.test1"
+netdata -c "netdata.conf.test1"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+killall netdata
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=force *:20002=dashboard|registry" "$CWD" "netdata.conf.test2"
+netdata -c "netdata.conf.test2"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+killall netdata
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management^SSL=optional *:20001=dashboard|registry|netdata.conf^SSL=force" "$CWD" "netdata.conf.test3"
+netdata -c "netdata.conf.test3"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+killall netdata
+
+rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+echo "All the tests were successful"
diff --git a/tests/acls/netdata.cfg b/tests/acls/netdata.cfg
deleted file mode 100644
index 1dcb4a5c6..000000000
--- a/tests/acls/netdata.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-# netdata configuration
-#
-# You can download the latest version of this file, using:
-#
-# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-# or
-# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-#
-# You can uncomment and change any of the options below.
-# The value shown in the commented settings, is the default value.
-#
-
-[global]
- run as user = netdata
-
- # the default database size - 1 hour
- history = 3600
-
- # by default do not expose the netdata port
- bind to = localhost
diff --git a/tests/acls/netdata.ssl.cfg b/tests/acls/netdata.ssl.cfg
deleted file mode 100644
index 28e0030d5..000000000
--- a/tests/acls/netdata.ssl.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-# netdata configuration
-#
-# You can download the latest version of this file, using:
-#
-# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-# or
-# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-#
-# You can uncomment and change any of the options below.
-# The value shown in the commented settings, is the default value.
-#
-
-[global]
- run as user = netdata
-
- # the default database size - 1 hour
- history = 3600
-
- # by default do not expose the netdata port
- bind to = localhost
-
-[web]
- ssl key =
- ssl certificate =
diff --git a/tests/backends/prometheus-avg-oldunits.txt b/tests/backends/prometheus-avg-oldunits.txt
deleted file mode 100644
index 53ee8ffa9..000000000
--- a/tests/backends/prometheus-avg-oldunits.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-nd_apps_cpu_percent_average
-nd_apps_cpu_system_percent_average
-nd_apps_cpu_user_percent_average
-nd_apps_files_open_files_average
-nd_apps_lreads_kilobytes_persec_average
-nd_apps_lwrites_kilobytes_persec_average
-nd_apps_major_faults_page_faults_persec_average
-nd_apps_mem_MB_average
-nd_apps_minor_faults_page_faults_persec_average
-nd_apps_pipes_open_pipes_average
-nd_apps_preads_kilobytes_persec_average
-nd_apps_processes_processes_average
-nd_apps_pwrites_kilobytes_persec_average
-nd_apps_sockets_open_sockets_average
-nd_apps_swap_MB_average
-nd_apps_threads_threads_average
-nd_apps_vmem_MB_average
-nd_cpu_core_throttling_events_persec_average
-nd_cpu_cpu_percent_average
-nd_cpu_interrupts_interrupts_persec_average
-nd_cpu_softirqs_softirqs_persec_average
-nd_cpu_softnet_stat_events_persec_average
-nd_disk_avgsz_kilobytes_per_operation_average
-nd_disk_await_ms_per_operation_average
-nd_disk_backlog_milliseconds_average
-nd_disk_inodes_Inodes_average
-nd_disk_io_kilobytes_persec_average
-nd_disk_iotime_milliseconds_persec_average
-nd_disk_mops_merged_operations_persec_average
-nd_disk_ops_operations_persec_average
-nd_disk_space_GB_average
-nd_disk_svctm_ms_per_operation_average
-nd_disk_util___of_time_working_average
-nd_ip_bcast_kilobits_persec_average
-nd_ip_bcastpkts_packets_persec_average
-nd_ip_ecnpkts_packets_persec_average
-nd_ip_inerrors_packets_persec_average
-nd_ip_mcast_kilobits_persec_average
-nd_ip_mcastpkts_packets_persec_average
-nd_ip_tcp_accept_queue_packets_persec_average
-nd_ip_tcpconnaborts_connections_persec_average
-nd_ip_tcpofo_packets_persec_average
-nd_ip_tcpreorders_packets_persec_average
-nd_ipv4_errors_packets_persec_average
-nd_ipv4_icmp_errors_packets_persec_average
-nd_ipv4_icmpmsg_packets_persec_average
-nd_ipv4_icmp_packets_persec_average
-nd_ipv4_packets_packets_persec_average
-nd_ipv4_sockstat_sockets_sockets_average
-nd_ipv4_sockstat_tcp_mem_KB_average
-nd_ipv4_sockstat_tcp_sockets_sockets_average
-nd_ipv4_sockstat_udp_mem_KB_average
-nd_ipv4_sockstat_udp_sockets_sockets_average
-nd_ipv4_tcperrors_packets_persec_average
-nd_ipv4_tcphandshake_events_persec_average
-nd_ipv4_tcpopens_connections_persec_average
-nd_ipv4_tcppackets_packets_persec_average
-nd_ipv4_tcpsock_active_connections_average
-nd_ipv4_udperrors_events_persec_average
-nd_ipv4_udppackets_packets_persec_average
-nd_ipv6_ect_packets_persec_average
-nd_ipv6_errors_packets_persec_average
-nd_ipv6_icmperrors_errors_persec_average
-nd_ipv6_icmp_messages_persec_average
-nd_ipv6_icmpmldv2_reports_persec_average
-nd_ipv6_icmpneighbor_messages_persec_average
-nd_ipv6_icmprouter_messages_persec_average
-nd_ipv6_icmptypes_messages_persec_average
-nd_ipv6_mcast_kilobits_persec_average
-nd_ipv6_mcastpkts_packets_persec_average
-nd_ipv6_packets_packets_persec_average
-nd_ipv6_sockstat6_raw_sockets_sockets_average
-nd_ipv6_sockstat6_tcp_sockets_sockets_average
-nd_ipv6_sockstat6_udp_sockets_sockets_average
-nd_ipv6_udperrors_events_persec_average
-nd_ipv6_udppackets_packets_persec_average
-nd_mem_available_MB_average
-nd_mem_committed_MB_average
-nd_mem_kernel_MB_average
-nd_mem_pgfaults_page_faults_persec_average
-nd_mem_slab_MB_average
-nd_mem_transparent_hugepages_MB_average
-nd_mem_writeback_MB_average
-nd_netdata_apps_children_fix_percent_average
-nd_netdata_apps_cpu_milliseconds_persec_average
-nd_netdata_apps_fix_percent_average
-nd_netdata_apps_sizes_files_persec_average
-nd_netdata_clients_connected_clients_average
-nd_netdata_compression_ratio_percent_average
-nd_netdata_go_plugin_execution_time_ms_average
-nd_netdata_net_kilobits_persec_average
-nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
-nd_netdata_plugin_diskspace_dt_milliseconds_run_average
-nd_netdata_plugin_diskspace_milliseconds_persec_average
-nd_netdata_plugin_proc_cpu_milliseconds_persec_average
-nd_netdata_plugin_proc_modules_milliseconds_run_average
-nd_netdata_plugin_tc_cpu_milliseconds_persec_average
-nd_netdata_plugin_tc_time_milliseconds_run_average
-nd_netdata_private_charts_charts_average
-nd_netdata_pythond_runtime_ms_average
-nd_netdata_requests_requests_persec_average
-nd_netdata_response_time_milliseconds_request_average
-nd_netdata_server_cpu_milliseconds_persec_average
-nd_netdata_statsd_bytes_kilobits_persec_average
-nd_netdata_statsd_cpu_milliseconds_persec_average
-nd_netdata_statsd_events_events_persec_average
-nd_netdata_statsd_metrics_metrics_average
-nd_netdata_statsd_packets_packets_persec_average
-nd_netdata_statsd_reads_reads_persec_average
-nd_netdata_statsd_useful_metrics_metrics_average
-nd_netdata_tcp_connected_sockets_average
-nd_netdata_tcp_connects_events_average
-nd_netdata_web_cpu_milliseconds_persec_average
-nd_net_drops_drops_persec_average
-nd_net_net_kilobits_persec_average
-nd_net_packets_packets_persec_average
-nd_services_cpu_percent_average
-nd_services_mem_usage_MB_average
-nd_services_swap_usage_MB_average
-nd_services_throttle_io_ops_read_operations_persec_average
-nd_services_throttle_io_ops_write_operations_persec_average
-nd_services_throttle_io_read_kilobytes_persec_average
-nd_services_throttle_io_write_kilobytes_persec_average
-nd_system_active_processes_processes_average
-nd_system_cpu_percent_average
-nd_system_ctxt_context_switches_persec_average
-nd_system_entropy_entropy_average
-nd_system_forks_processes_persec_average
-nd_system_idlejitter_microseconds_lost_persec_average
-nd_system_interrupts_interrupts_persec_average
-nd_system_intr_interrupts_persec_average
-nd_system_io_kilobytes_persec_average
-nd_system_ipc_semaphore_arrays_arrays_average
-nd_system_ipc_semaphores_semaphores_average
-nd_system_ip_kilobits_persec_average
-nd_system_ipv6_kilobits_persec_average
-nd_system_load_load_average
-nd_system_net_kilobits_persec_average
-nd_system_pgpgio_kilobytes_persec_average
-nd_system_processes_processes_average
-nd_system_ram_MB_average
-nd_system_shared_memory_bytes_bytes_average
-nd_system_shared_memory_segments_segments_average
-nd_system_softirqs_softirqs_persec_average
-nd_system_softnet_stat_events_persec_average
-nd_system_swapio_kilobytes_persec_average
-nd_system_swap_MB_average
-nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-avg.txt b/tests/backends/prometheus-avg.txt
deleted file mode 100644
index 1aedff2b5..000000000
--- a/tests/backends/prometheus-avg.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-nd_apps_cpu_percentage_average
-nd_apps_cpu_system_percentage_average
-nd_apps_cpu_user_percentage_average
-nd_apps_files_open_files_average
-nd_apps_lreads_KiB_persec_average
-nd_apps_lwrites_KiB_persec_average
-nd_apps_major_faults_page_faults_persec_average
-nd_apps_mem_MiB_average
-nd_apps_minor_faults_page_faults_persec_average
-nd_apps_pipes_open_pipes_average
-nd_apps_preads_KiB_persec_average
-nd_apps_processes_processes_average
-nd_apps_pwrites_KiB_persec_average
-nd_apps_sockets_open_sockets_average
-nd_apps_swap_MiB_average
-nd_apps_threads_threads_average
-nd_apps_vmem_MiB_average
-nd_cpu_core_throttling_events_persec_average
-nd_cpu_cpu_percentage_average
-nd_cpu_interrupts_interrupts_persec_average
-nd_cpu_softirqs_softirqs_persec_average
-nd_cpu_softnet_stat_events_persec_average
-nd_disk_avgsz_KiB_operation_average
-nd_disk_await_milliseconds_operation_average
-nd_disk_backlog_milliseconds_average
-nd_disk_inodes_inodes_average
-nd_disk_io_KiB_persec_average
-nd_disk_iotime_milliseconds_persec_average
-nd_disk_mops_merged_operations_persec_average
-nd_disk_ops_operations_persec_average
-nd_disk_space_GiB_average
-nd_disk_svctm_milliseconds_operation_average
-nd_disk_util___of_time_working_average
-nd_ip_bcast_kilobits_persec_average
-nd_ip_bcastpkts_packets_persec_average
-nd_ip_ecnpkts_packets_persec_average
-nd_ip_inerrors_packets_persec_average
-nd_ip_mcast_kilobits_persec_average
-nd_ip_mcastpkts_packets_persec_average
-nd_ip_tcp_accept_queue_packets_persec_average
-nd_ip_tcpconnaborts_connections_persec_average
-nd_ip_tcpofo_packets_persec_average
-nd_ip_tcpreorders_packets_persec_average
-nd_ipv4_errors_packets_persec_average
-nd_ipv4_icmp_errors_packets_persec_average
-nd_ipv4_icmpmsg_packets_persec_average
-nd_ipv4_icmp_packets_persec_average
-nd_ipv4_packets_packets_persec_average
-nd_ipv4_sockstat_sockets_sockets_average
-nd_ipv4_sockstat_tcp_mem_KiB_average
-nd_ipv4_sockstat_tcp_sockets_sockets_average
-nd_ipv4_sockstat_udp_mem_KiB_average
-nd_ipv4_sockstat_udp_sockets_sockets_average
-nd_ipv4_tcperrors_packets_persec_average
-nd_ipv4_tcphandshake_events_persec_average
-nd_ipv4_tcpopens_connections_persec_average
-nd_ipv4_tcppackets_packets_persec_average
-nd_ipv4_tcpsock_active_connections_average
-nd_ipv4_udperrors_events_persec_average
-nd_ipv4_udppackets_packets_persec_average
-nd_ipv6_ect_packets_persec_average
-nd_ipv6_errors_packets_persec_average
-nd_ipv6_icmperrors_errors_persec_average
-nd_ipv6_icmp_messages_persec_average
-nd_ipv6_icmpmldv2_reports_persec_average
-nd_ipv6_icmpneighbor_messages_persec_average
-nd_ipv6_icmprouter_messages_persec_average
-nd_ipv6_icmptypes_messages_persec_average
-nd_ipv6_mcast_kilobits_persec_average
-nd_ipv6_mcastpkts_packets_persec_average
-nd_ipv6_packets_packets_persec_average
-nd_ipv6_sockstat6_raw_sockets_sockets_average
-nd_ipv6_sockstat6_tcp_sockets_sockets_average
-nd_ipv6_sockstat6_udp_sockets_sockets_average
-nd_ipv6_udperrors_events_persec_average
-nd_ipv6_udppackets_packets_persec_average
-nd_mem_available_MiB_average
-nd_mem_committed_MiB_average
-nd_mem_kernel_MiB_average
-nd_mem_pgfaults_faults_persec_average
-nd_mem_slab_MiB_average
-nd_mem_transparent_hugepages_MiB_average
-nd_mem_writeback_MiB_average
-nd_netdata_apps_children_fix_percentage_average
-nd_netdata_apps_cpu_milliseconds_persec_average
-nd_netdata_apps_fix_percentage_average
-nd_netdata_apps_sizes_files_persec_average
-nd_netdata_clients_connected_clients_average
-nd_netdata_compression_ratio_percentage_average
-nd_netdata_go_plugin_execution_time_ms_average
-nd_netdata_net_kilobits_persec_average
-nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
-nd_netdata_plugin_diskspace_dt_milliseconds_run_average
-nd_netdata_plugin_diskspace_milliseconds_persec_average
-nd_netdata_plugin_proc_cpu_milliseconds_persec_average
-nd_netdata_plugin_proc_modules_milliseconds_run_average
-nd_netdata_plugin_tc_cpu_milliseconds_persec_average
-nd_netdata_plugin_tc_time_milliseconds_run_average
-nd_netdata_private_charts_charts_average
-nd_netdata_pythond_runtime_ms_average
-nd_netdata_requests_requests_persec_average
-nd_netdata_response_time_milliseconds_request_average
-nd_netdata_server_cpu_milliseconds_persec_average
-nd_netdata_statsd_bytes_kilobits_persec_average
-nd_netdata_statsd_cpu_milliseconds_persec_average
-nd_netdata_statsd_events_events_persec_average
-nd_netdata_statsd_metrics_metrics_average
-nd_netdata_statsd_packets_packets_persec_average
-nd_netdata_statsd_reads_reads_persec_average
-nd_netdata_statsd_useful_metrics_metrics_average
-nd_netdata_tcp_connected_sockets_average
-nd_netdata_tcp_connects_events_average
-nd_netdata_web_cpu_milliseconds_persec_average
-nd_net_drops_drops_persec_average
-nd_net_net_kilobits_persec_average
-nd_net_packets_packets_persec_average
-nd_services_cpu_percentage_average
-nd_services_mem_usage_MiB_average
-nd_services_swap_usage_MiB_average
-nd_services_throttle_io_ops_read_operations_persec_average
-nd_services_throttle_io_ops_write_operations_persec_average
-nd_services_throttle_io_read_KiB_persec_average
-nd_services_throttle_io_write_KiB_persec_average
-nd_system_active_processes_processes_average
-nd_system_cpu_percentage_average
-nd_system_ctxt_context_switches_persec_average
-nd_system_entropy_entropy_average
-nd_system_forks_processes_persec_average
-nd_system_idlejitter_microseconds_lost_persec_average
-nd_system_interrupts_interrupts_persec_average
-nd_system_intr_interrupts_persec_average
-nd_system_io_KiB_persec_average
-nd_system_ipc_semaphore_arrays_arrays_average
-nd_system_ipc_semaphores_semaphores_average
-nd_system_ip_kilobits_persec_average
-nd_system_ipv6_kilobits_persec_average
-nd_system_load_load_average
-nd_system_net_kilobits_persec_average
-nd_system_pgpgio_KiB_persec_average
-nd_system_processes_processes_average
-nd_system_ram_MiB_average
-nd_system_shared_memory_bytes_bytes_average
-nd_system_shared_memory_segments_segments_average
-nd_system_softirqs_softirqs_persec_average
-nd_system_softnet_stat_events_persec_average
-nd_system_swapio_KiB_persec_average
-nd_system_swap_MiB_average
-nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-raw.txt b/tests/backends/prometheus-raw.txt
deleted file mode 100644
index 2ac4c2c7a..000000000
--- a/tests/backends/prometheus-raw.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-nd_apps_cpu
-nd_apps_cpu_system
-nd_apps_cpu_user
-nd_apps_files
-nd_apps_lreads
-nd_apps_lwrites
-nd_apps_major_faults
-nd_apps_mem
-nd_apps_minor_faults
-nd_apps_pipes
-nd_apps_preads
-nd_apps_processes
-nd_apps_pwrites
-nd_apps_sockets
-nd_apps_swap
-nd_apps_threads
-nd_apps_vmem
-nd_cpu_core_throttling_total
-nd_cpu_cpu_total
-nd_cpu_interrupts_total
-nd_cpu_softirqs_total
-nd_cpu_softnet_stat_total
-nd_disk_avgsz
-nd_disk_await
-nd_disk_backlog_total
-nd_disk_inodes
-nd_disk_iotime_total
-nd_disk_io_total
-nd_disk_mops_total
-nd_disk_ops_total
-nd_disk_space
-nd_disk_svctm
-nd_disk_util_total
-nd_ip_bcastpkts_total
-nd_ip_bcast_total
-nd_ip_ecnpkts_total
-nd_ip_inerrors_total
-nd_ip_mcastpkts_total
-nd_ip_mcast_total
-nd_ip_tcp_accept_queue_total
-nd_ip_tcpconnaborts_total
-nd_ip_tcpofo_total
-nd_ip_tcpreorders_total
-nd_ipv4_errors_total
-nd_ipv4_icmp_errors_total
-nd_ipv4_icmpmsg_total
-nd_ipv4_icmp_total
-nd_ipv4_packets_total
-nd_ipv4_sockstat_sockets
-nd_ipv4_sockstat_tcp_mem
-nd_ipv4_sockstat_tcp_sockets
-nd_ipv4_sockstat_udp_mem
-nd_ipv4_sockstat_udp_sockets
-nd_ipv4_tcperrors_total
-nd_ipv4_tcphandshake_total
-nd_ipv4_tcpopens_total
-nd_ipv4_tcppackets_total
-nd_ipv4_tcpsock
-nd_ipv4_udperrors_total
-nd_ipv4_udppackets_total
-nd_ipv6_ect_total
-nd_ipv6_errors_total
-nd_ipv6_icmperrors_total
-nd_ipv6_icmpmldv2_total
-nd_ipv6_icmpneighbor_total
-nd_ipv6_icmprouter_total
-nd_ipv6_icmp_total
-nd_ipv6_icmptypes_total
-nd_ipv6_mcastpkts_total
-nd_ipv6_mcast_total
-nd_ipv6_packets_total
-nd_ipv6_sockstat6_raw_sockets
-nd_ipv6_sockstat6_tcp_sockets
-nd_ipv6_sockstat6_udp_sockets
-nd_ipv6_udperrors_total
-nd_ipv6_udppackets_total
-nd_mem_available
-nd_mem_committed
-nd_mem_kernel
-nd_mem_pgfaults_total
-nd_mem_slab
-nd_mem_transparent_hugepages
-nd_mem_writeback
-nd_netdata_apps_children_fix
-nd_netdata_apps_cpu_total
-nd_netdata_apps_fix
-nd_netdata_apps_sizes_calls_total
-nd_netdata_apps_sizes_fds
-nd_netdata_apps_sizes_filenames_total
-nd_netdata_apps_sizes_files_total
-nd_netdata_apps_sizes_inode_changes_total
-nd_netdata_apps_sizes_link_changes_total
-nd_netdata_apps_sizes_new_pids_total
-nd_netdata_apps_sizes_pids
-nd_netdata_apps_sizes_targets
-nd_netdata_clients
-nd_netdata_compression_ratio
-nd_netdata_go_plugin_execution_time
-nd_netdata_net_total
-nd_netdata_plugin_cgroups_cpu_total
-nd_netdata_plugin_diskspace_dt
-nd_netdata_plugin_diskspace_total
-nd_netdata_plugin_proc_cpu_total
-nd_netdata_plugin_proc_modules
-nd_netdata_plugin_tc_cpu_total
-nd_netdata_plugin_tc_time
-nd_netdata_private_charts
-nd_netdata_pythond_runtime
-nd_netdata_requests_total
-nd_netdata_response_time
-nd_netdata_server_cpu_total
-nd_netdata_statsd_bytes_total
-nd_netdata_statsd_cpu_total
-nd_netdata_statsd_events_total
-nd_netdata_statsd_metrics
-nd_netdata_statsd_packets_total
-nd_netdata_statsd_reads_total
-nd_netdata_statsd_useful_metrics
-nd_netdata_tcp_connected
-nd_netdata_tcp_connects_total
-nd_netdata_web_cpu_total
-nd_net_drops_total
-nd_net_net_total
-nd_net_packets_total
-nd_services_cpu_total
-nd_services_mem_usage
-nd_services_swap_usage
-nd_services_throttle_io_ops_read_total
-nd_services_throttle_io_ops_write_total
-nd_services_throttle_io_read_total
-nd_services_throttle_io_write_total
-nd_system_active_processes
-nd_system_cpu_total
-nd_system_ctxt_total
-nd_system_entropy
-nd_system_forks_total
-nd_system_idlejitter
-nd_system_interrupts_total
-nd_system_intr_total
-nd_system_io_total
-nd_system_ipc_semaphore_arrays
-nd_system_ipc_semaphores
-nd_system_ip_total
-nd_system_ipv6_total
-nd_system_load
-nd_system_net_total
-nd_system_pgpgio_total
-nd_system_processes
-nd_system_ram
-nd_system_shared_memory_bytes
-nd_system_shared_memory_segments
-nd_system_softirqs_total
-nd_system_softnet_stat_total
-nd_system_swap
-nd_system_swapio_total
-nd_system_uptime
diff --git a/tests/backends/prometheus.bats b/tests/backends/prometheus.bats
deleted file mode 100755
index d52f39d54..000000000
--- a/tests/backends/prometheus.bats
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bats
-
-validate_metrics() {
- fname="${1}"
- params="${2}"
-
- curl -sS "http://localhost:19999/api/v1/allmetrics?format=prometheus&prefix=nd&timestamps=no${params}" |
- grep -E 'nd_system_|nd_cpu_|nd_system_|nd_net_|nd_disk_|nd_ip_|nd_ipv4_|nd_ipv6_|nd_mem_|nd_netdata_|nd_apps_|nd_services_' |
- sed -ne 's/{.*//p' | sort | uniq > tests/backends/new-${fname}
- diff tests/backends/${fname} tests/backends/new-${fname}
- rm tests/backends/new-${fname}
-}
-
-
-if [ ! -f .gitignore ]; then
- echo "Need to run as ./tests/backends/$(basename "$0") from top level directory of git repository" >&2
- exit 1
-fi
-
-
-@test "prometheus raw" {
- validate_metrics prometheus-raw.txt "&data=raw"
-}
-
-@test "prometheus avg" {
- validate_metrics prometheus-avg.txt ""
-}
-
-@test "prometheus avg oldunits" {
- validate_metrics prometheus-avg-oldunits.txt "&oldunits=yes"
-}
diff --git a/tests/health_mgmtapi/README.md b/tests/health_mgmtapi/README.md
deleted file mode 100644
index 8473b35ea..000000000
--- a/tests/health_mgmtapi/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Health command API tester
-
-The directory `tests/health_cmdapi` contains the test script `health-cmdapi-test.sh` for the [health command API](../../web/api/health).
-
-The script can be executed with options to prepare the system for the tests, run them and restore the system to its previous state.
-
-It depends on the management API being accessible on localhost:19999 and on the responses to the api/v1/alarms?all requests being functional.
-It also requires read access to the management API key that is usually under `/var/lib/netdata/netdata.api.key` (`@varlibdir_POST@/netdata.api.key`).
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2Fhealth_mgmtapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
-
-
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
deleted file mode 100644
index 9f05efe70..000000000
--- a/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_iowait" }, { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
deleted file mode 100644
index dbf879925..000000000
--- a/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "context": "system.cpu" }, { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json b/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
deleted file mode 100644
index a267cfd6f..000000000
--- a/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "DISABLE", "silencers": [ { "context": "system.cpu" }, { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE-list.json b/tests/health_mgmtapi/expected_list/DISABLE-list.json
deleted file mode 100644
index c2c778104..000000000
--- a/tests/health_mgmtapi/expected_list/DISABLE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "DISABLE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
deleted file mode 100644
index bbc3f4f0c..000000000
--- a/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": true, "type": "DISABLE", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
deleted file mode 100644
index e8aee1795..000000000
--- a/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
+++ /dev/null
@@ -1 +0,0 @@
-Auth Error
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json b/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
deleted file mode 100644
index a7fc1cb8a..000000000
--- a/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "DISABLE", "silencers": [ { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json b/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
deleted file mode 100644
index 50119f79c..000000000
--- a/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "None", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/HOSTS-list.json b/tests/health_mgmtapi/expected_list/HOSTS-list.json
deleted file mode 100644
index 9db21b6c3..000000000
--- a/tests/health_mgmtapi/expected_list/HOSTS-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "hosts": "*" } ] }
diff --git a/tests/health_mgmtapi/expected_list/RESET-list.json b/tests/health_mgmtapi/expected_list/RESET-list.json
deleted file mode 100644
index 2d3f09d68..000000000
--- a/tests/health_mgmtapi/expected_list/RESET-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "None", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE-list.json b/tests/health_mgmtapi/expected_list/SILENCE-list.json
deleted file mode 100644
index d157f2d3a..000000000
--- a/tests/health_mgmtapi/expected_list/SILENCE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_2-list.json b/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
deleted file mode 100644
index d5e6fa2d1..000000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_3-list.json b/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
deleted file mode 100644
index 69e98cc19..000000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [] } WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
deleted file mode 100644
index dd789cd33..000000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
deleted file mode 100644
index d157f2d3a..000000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
deleted file mode 100644
index c88ef9fde..000000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": true, "type": "SILENCE", "silencers": [] }
diff --git a/tests/health_mgmtapi/health-cmdapi-test.sh b/tests/health_mgmtapi/health-cmdapi-test.sh
new file mode 100644
index 000000000..da981b14f
--- /dev/null
+++ b/tests/health_mgmtapi/health-cmdapi-test.sh
@@ -0,0 +1,226 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117,SC2034,SC2059,SC2086,SC2181
+
+NETDATA_VARLIB_DIR="/var/lib/netdata"
+
+check () {
+ sec=1
+ echo -e " ${GRAY}Check: '${1}' in $sec sec"
+ sleep $sec
+ number=$RANDOM
+ resp=$(curl -s "http://$URL/api/v1/alarms?all&$number")
+ r=$(echo "${resp}" | \
+ python3 -c "import sys, json; d=json.load(sys.stdin); \
+ print(\
+ d['alarms']['system.cpu.10min_cpu_usage']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_usage']['silenced'] , \
+ d['alarms']['system.cpu.10min_cpu_iowait']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_iowait']['silenced'], \
+ d['alarms']['system.load.load_trigger']['disabled'], \
+ d['alarms']['system.load.load_trigger']['silenced'], \
+ );" 2>&1)
+ if [ $? -ne 0 ] ; then
+ echo -e " ${RED}ERROR: Unexpected response stored in /tmp/resp-$number.json"
+ echo "$resp" > /tmp/resp-$number.json
+ err=$((err+1))
+ iter=0
+ elif [ "${r}" != "${2}" ] ; then
+ echo -e " ${GRAY}WARNING: 'Got ${r}'. Expected '${2}'"
+ iter=$((iter+1))
+ if [ $iter -lt 10 ] ; then
+ echo -e " ${GRAY}Repeating test "
+ check "$1" "$2"
+ else
+ echo -e " ${RED}ERROR: 'Got ${r}'. Expected '${2}'"
+ iter=0
+ err=$((err+1))
+ fi
+ else
+ echo -e " ${GREEN}Success"
+ iter=0
+ fi
+}
+
+cmd () {
+ echo -e "${WHITE}Cmd '${1}'"
+ echo -en " ${GRAY}Expecting '${2}' : "
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?${1}" -H "X-Auth-Token: $TOKEN" 2>&1)
+ if [ "${RESPONSE}" != "${2}" ] ; then
+ echo -e "${RED}ERROR: Response '${RESPONSE}'"
+ err=$((err+1))
+ else
+ echo -e "${GREEN}Success"
+ fi
+}
+
+check_list() {
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: $TOKEN" 2>&1)
+
+ NAME="$1-list.json"
+ echo $RESPONSE > $NAME
+ diff $NAME expected_list/$NAME 1>/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo -e "${GREEN}Success: The list command got the correct answer for $NAME!"
+ else
+ echo -e "${RED}ERROR: the files $NAME and expected_list/$NAME does not match."
+ exit 1
+ fi
+}
+
+WHITE='\033[0;37m'
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+GRAY='\033[0;37m'
+
+SETUP=0
+RESTART=0
+CLEANUP=0
+TEST=0
+URL="localhost:19999"
+
+err=0
+
+
+ HEALTH_CMDAPI_MSG_AUTHERROR="Auth Error"
+ HEALTH_CMDAPI_MSG_SILENCEALL="All alarm notifications are silenced"
+ HEALTH_CMDAPI_MSG_DISABLEALL="All health checks are disabled"
+ HEALTH_CMDAPI_MSG_RESET="All health checks and notifications are enabled"
+ HEALTH_CMDAPI_MSG_DISABLE="Health checks disabled for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_SILENCE="Alarm notifications silenced for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_ADDED="Alarm selector added"
+ HEALTH_CMDAPI_MSG_INVALID_KEY="Invalid key. Ignoring it."
+ HEALTH_CMDAPI_MSG_STYPEWARNING="WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command."
+ HEALTH_CMDAPI_MSG_NOSELECTORWARNING="WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors."
+
+ if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r CORRECT_TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+ else
+ echo "${NETDATA_VARLIB_DIR}/netdata.api.key not found"
+ exit 1
+ fi
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test default state
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test auth failure
+ TOKEN="Wrong token"
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_AUTHERROR"
+ check "Default State" "False False False False False False"
+ check_list "DISABLE_ALL_ERROR"
+
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test disable
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_DISABLEALL"
+ check "All disabled" "True False True False True False"
+ check_list "DISABLE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test silence
+ cmd "cmd=SILENCE ALL" "$HEALTH_CMDAPI_MSG_SILENCEALL"
+ check "All silenced" "False True False True False True"
+ check_list "SILENCE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Add silencer by name
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger" "${resp}"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER"
+
+ # Convert to disable health checks
+ cmd "cmd=DISABLE" "$HEALTH_CMDAPI_MSG_DISABLE"
+ check "Disable notifications for alarm1 and load_trigger" "True False False False True False"
+ check_list "DISABLE"
+
+ # Convert back to silence notifications
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE"
+
+ # Add second silencer by name
+ cmd "alarm=*10min_cpu_iowait" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications for alarm1,alarm2 and load_trigger" "False True False True False True"
+ check_list "ALARM_CPU_IOWAIT"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer by chart
+ printf -v resp "$HEALTH_CMDAPI_MSG_DISABLE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=DISABLE&chart=system.load" "${resp}"
+ check "Default State" "False False False False True False"
+ check_list "DISABLE_SYSTEM_LOAD"
+
+ # Add silencer by context
+ cmd "context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Default State" "True False True False True False"
+ check_list "CONTEXT_SYSTEM_CPU"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add second condition to a selector (AND)
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger&chart=system.load" "${resp}"
+ check "Silence notifications load_trigger" "False False False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE"
+
+ # Add second selector with two conditions
+ cmd "alarm=*10min_cpu_usage *load_trigger&context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications load_trigger" "False True False False False True"
+ check_list "ALARM_CPU_USAGE"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer without a command to disable or silence alarms
+ printf -v resp "$HEALTH_CMDAPI_MSG_ADDED\n$HEALTH_CMDAPI_MSG_STYPEWARNING"
+ cmd "families=load" "${resp}"
+ check "Family selector with no command" "False False False False False False"
+ check_list "FAMILIES_LOAD"
+
+ # Add silence command
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence family load" "False False False False False True"
+ check_list "SILENCE_2"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add command without silencers
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_NOSELECTORWARNING"
+ cmd "cmd=SILENCE" "${resp}"
+ check "Command with no selector" "False False False False False False"
+ check_list "SILENCE_3"
+
+ # Add hosts silencer
+ cmd "hosts=*" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence all hosts" "False True False True False True"
+ check_list "HOSTS"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+if [ $err -gt 0 ] ; then
+ echo "$err error(s) found"
+ exit 1
+fi
diff --git a/tests/installer/checksums.sh b/tests/installer/checksums.sh
deleted file mode 100755
index b2b0b2a22..000000000
--- a/tests/installer/checksums.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#
-# Mechanism to validate kickstart files integrity status
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pawel Krupa (pawel@netdata.cloud)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel 2> /dev/null || echo "")")
-CWD="$(git rev-parse --show-cdup 2> /dev/null || echo "")"
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./tests/installer/$(basename "$0") from top level directory of netdata git repository"
- echo "Kickstart validation process aborted"
- exit 1
-fi
-
-README_DOC="packaging/installer/README.md"
-source ./tests/installer/slack.sh
-
-for file in kickstart.sh kickstart-static64.sh; do
- README_MD5=$(grep "$file" $README_DOC | grep md5sum | cut -d '"' -f2)
- KICKSTART_URL="https://my-netdata.io/$file"
- KICKSTART="packaging/installer/$file"
- KICKSTART_MD5="$(md5sum "${KICKSTART}" | cut -d' ' -f1)"
- CALCULATED_MD5="$(curl -Ss ${KICKSTART_URL} | md5sum | cut -d ' ' -f 1)"
-
- # Conditionally run the website validation
- if [ -z "${LOCAL_ONLY}" ]; then
- echo "Validating ${KICKSTART_URL} against local file ${KICKSTART} with MD5 ${KICKSTART_MD5}.."
- if [ "$KICKSTART_MD5" == "$CALCULATED_MD5" ]; then
- echo "${KICKSTART_URL} looks fine"
- else
- post_message "TRAVIS_MESSAGE" "Attention <!here> , ${KICKSTART_URL} md5sum does not match local file, it needs to be updated"
- fi
- fi
-
- echo "Validating documentation for $file"
- if [ "$KICKSTART_MD5" != "$README_MD5" ]; then
- echo "Invalid checksum for $file in $README_DOC."
- echo "checksum in docs: $README_MD5"
- echo "current checksum: $KICKSTART_MD5"
- exit 2
- else
- echo "$file MD5Sum is well documented"
- fi
-
-done
-echo "No problems found, exiting succesfully!"
diff --git a/tests/installer/slack.sh b/tests/installer/slack.sh
deleted file mode 100755
index 83cb5fa7c..000000000
--- a/tests/installer/slack.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-# #No shebang necessary
-# BASH Lib: Simple incoming webhook for slack integration.
-#
-# The script expects the following parameters to be defined by the upper layer:
-# SLACK_NOTIFY_WEBHOOK_URL
-# SLACK_BOT_NAME
-# SLACK_CHANNEL
-#
-# Copyright:
-#
-# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud
-
-post_message() {
- TYPE="$1"
- MESSAGE="$2"
- CUSTOM_CHANNEL="$3"
-
- case "$TYPE" in
- "PLAIN_MESSAGE")
- curl -X POST --data-urlencode "payload={\"channel\": \"${SLACK_CHANNEL}\", \"username\": \"${SLACK_BOT_NAME}\", \"text\": \"${MESSAGE}\", \"icon_emoji\": \":space_invader:\"}" ${SLACK_NOTIFY_WEBHOOK_URL}
- ;;
- "TRAVIS_MESSAGE")
- EVENT_LINE="${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}', on '${TRAVIS_OS_NAME}'"
- if [ "$TRAVIS_EVENT_TYPE}" == "pull_request" ]; then
- EVENT_LINE="${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}' #${TRAVIS_PULL_REQUEST}, on '${TRAVIS_OS_NAME}' "
- fi
-
- if [ -n "${CUSTOM_CHANNEL}" ]; then
- echo "Sending travis message to custom channel ${CUSTOM_CHANNEL}"
- OPTIONAL_CHANNEL_INFO="\"channel\": \"${CUSTOM_CHANNEL}\","
- fi
-
- POST_MESSAGE="{
- ${OPTIONAL_CHANNEL_INFO}
- \"text\": \"${TRAVIS_REPO_SLUG}, ${MESSAGE}\",
- \"attachments\": [{
- \"text\": \"${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}', on '${TRAVIS_OS_NAME}' \",
- \"fallback\": \"I could not determine the build\",
- \"callback_id\": \"\",
- \"color\": \"#3AA3E3\",
- \"attachment_type\": \"default\",
- \"actions\": [
- {
- \"name\": \"${TRAVIS_BUILD_NUMBER}\",
- \"text\": \"Build #${TRAVIS_BUILD_NUMBER}\",
- \"type\": \"button\",
- \"url\": \"${TRAVIS_BUILD_WEB_URL}\"
- },
- {
- \"name\": \"${TRAVIS_JOB_NUMBER}\",
- \"text\": \"Job #${TRAVIS_JOB_NUMBER}\",
- \"type\": \"button\",
- \"url\": \"${TRAVIS_JOB_WEB_URL}\"
- }]
- }]
- }"
- echo "Sending ${POST_MESSAGE}"
- curl -X POST --data-urlencode "payload=${POST_MESSAGE}" "${SLACK_NOTIFY_WEBHOOK_URL}"
- ;;
- *)
- echo "Unrecognized message type \"$TYPE\" was given"
- return 1
- ;;
- esac
-}
diff --git a/tests/k6/data.js b/tests/k6/data.js
deleted file mode 100644
index fb4e087ee..000000000
--- a/tests/k6/data.js
+++ /dev/null
@@ -1,67 +0,0 @@
-import http from "k6/http";
-import { log, check, group, sleep } from "k6";
-import { Rate } from "k6/metrics";
-
-// A custom metric to track failure rates
-var failureRate = new Rate("check_failure_rate");
-
-// Options
-export let options = {
- stages: [
- // Linearly ramp up from 1 to 20 VUs during first 30s
- { target: 20, duration: "30s" },
- // Hold at 50 VUs for the next 1 minute
- { target: 20, duration: "1m" },
- // Linearly ramp down from 50 to 0 VUs over the last 10 seconds
- { target: 0, duration: "10s" }
- ],
- thresholds: {
- // We want the 95th percentile of all HTTP request durations to be less than 500ms
- "http_req_duration": ["p(95)<500"],
- // Requests with the fast tag should finish even faster
- "http_req_duration{fast:yes}": ["p(99)<250"],
- // Thresholds based on the custom metric we defined and use to track application failures
- "check_failure_rate": [
- // Global failure rate should be less than 1%
- "rate<0.01",
- // Abort the test early if it climbs over 5%
- { threshold: "rate<=0.05", abortOnFail: true },
- ],
- },
-};
-
-function rnd(min, max) {
- min = Math.ceil(min);
- max = Math.floor(max);
- return Math.floor(Math.random() * (max - min)) + min; //The maximum is exclusive and the minimum is inclusive
-}
-
-// Main function
-export default function () {
- // Control what the data request asks for
- let charts = [ "example.random" ]
- let chartmin = 0;
- let chartmax = charts.length - 1;
- let aftermin = 60;
- let aftermax = 3600;
- let beforemin = 3503600;
- let beforemax = 3590000;
- let pointsmin = 300;
- let pointsmax = 3600;
-
- group("Requests", function () {
- // Execute multiple requests in parallel like a browser, to fetch data for the charts. The one taking the longes is the data request.
- let resps = http.batch([
- ["GET", "http://localhost:19999/api/v1/info", { tags: { fast: "yes" } }],
- ["GET", "http://localhost:19999/api/v1/charts", { tags: { fast: "yes" } }],
- ["GET", "http://localhost:19999/api/v1/data?chart="+charts[rnd(chartmin,chartmax)]+"&before=-"+rnd(beforemin,beforemax)+"&after=-"+rnd(aftermin,aftermax)+"&points="+rnd(pointsmin,pointsmax)+"&format=json&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&_="+rnd(1,1000000000000), { }],
- ["GET", "http://localhost:19999/api/v1/alarms", { tags: { fast: "yes" } }]
- ]);
- // Combine check() call with failure tracking
- failureRate.add(!check(resps, {
- "status is 200": (r) => r[0].status === 200 && r[1].status === 200
- }));
- });
-
- sleep(Math.random() * 2 + 1); // Random sleep between 1s and 3s
-}
diff --git a/tests/lifecycle.bats b/tests/lifecycle.bats
deleted file mode 100755
index 4d18115be..000000000
--- a/tests/lifecycle.bats
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env bats
-#
-# Netdata installation lifecycle testing script.
-# This is to validate the install, update and uninstall of netdata
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-
-INSTALLATION="$BATS_TMPDIR/installation"
-ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
-# list of files which need to be checked. Path cannot start from '/'
-FILES="usr/libexec/netdata/plugins.d/go.d.plugin
- usr/libexec/netdata/plugins.d/charts.d.plugin
- usr/libexec/netdata/plugins.d/python.d.plugin
- usr/libexec/netdata/plugins.d/node.d.plugin"
-
-DIRS="usr/sbin/netdata
- etc/netdata
- usr/share/netdata
- usr/libexec/netdata
- var/cache/netdata
- var/lib/netdata
- var/log/netdata"
-
-setup() {
- # If we are not in netdata git repo, at the top level directory, fail
- TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
- CWD=$(git rev-parse --show-cdup || echo "")
- if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./tests/lifecycle/$(basename "$0") from top level directory of git repository"
- exit 1
- fi
-}
-
-@test "install netdata" {
- ./netdata-installer.sh --dont-wait --dont-start-it --auto-update --install "${INSTALLATION}"
-
- # Validate particular files
- for file in $FILES; do
- [ ! -f "$BATS_TMPDIR/$file" ]
- done
-
- # Validate particular directories
- for a_dir in $DIRS; do
- [ ! -d "$BATS_TMPDIR/$a_dir" ]
- done
-}
-
-@test "update netdata" {
- export ENVIRONMENT_FILE="${ENV}"
- /etc/cron.daily/netdata-updater
- ! grep "new_installation" "${ENV}"
-}
-
-@test "uninstall netdata" {
- ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
- [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
- [ ! -f "/etc/cron.daily/netdata-updater" ]
-}
diff --git a/tests/profile/Makefile b/tests/profile/Makefile
deleted file mode 100644
index 5f4e8b521..000000000
--- a/tests/profile/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-COMMON_CFLAGS = -I ../../ -DTARGET_OS=1 -Wall -Wextra
-PROFILE_CFLAGS = -O1 -ggdb $(COMMON_CFLAGS)
-PERFORMANCE_CFLAGS = -O2 $(COMMON_CFLAGS)
-
-CFLAGS = $(PERFORMANCE_CFLAGS)
-
-LIBNETDATA_FILES = \
- ../../libnetdata/popen/popen.o \
- ../../libnetdata/storage_number/storage_number.o \
- ../../libnetdata/avl/avl.o \
- ../../libnetdata/socket/socket.o \
- ../../libnetdata/os.o \
- ../../libnetdata/clocks/clocks.o \
- ../../libnetdata/procfile/procfile.o \
- ../../libnetdata/statistical/statistical.o \
- ../../libnetdata/eval/eval.o \
- ../../libnetdata/threads/threads.o \
- ../../libnetdata/dictionary/dictionary.o \
- ../../libnetdata/simple_pattern/simple_pattern.o \
- ../../libnetdata/url/url.o \
- ../../libnetdata/config/appconfig.o \
- ../../libnetdata/libnetdata.o \
- ../../libnetdata/buffer/buffer.o \
- ../../libnetdata/adaptive_resortable_list/adaptive_resortable_list.o \
- ../../libnetdata/locks/locks.o \
- ../../libnetdata/log/log.o \
- $(NULL)
-
-COMMON_LDFLAGS = $(LIBNETDATA_FILES) -pthread -lm
-
-all: statsd-stress benchmark-procfile-parser test-eval benchmark-dictionary benchmark-value-pairs
-
-benchmark-procfile-parser: benchmark-procfile-parser.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-benchmark-dictionary: benchmark-dictionary.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-benchmark-value-pairs: benchmark-value-pairs.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-statsd-stress: statsd-stress.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-test-eval: test-eval.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-
-clean:
- rm -f benchmark-procfile-parser statsd-stress test-eval benchmark-dictionary benchmark-value-pairs
-
diff --git a/tests/profile/benchmark-dictionary.c b/tests/profile/benchmark-dictionary.c
deleted file mode 100644
index 30c098d5d..000000000
--- a/tests/profile/benchmark-dictionary.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-/*
- * 1. build netdata (as normally)
- * 2. cd tests/profile/
- * 3. compile with:
- * gcc -O3 -Wall -Wextra -I ../../src/ -I ../../ -o benchmark-dictionary benchmark-dictionary.c ../../src/dictionary.o ../../src/log.o ../../src/avl.o ../../src/common.o -pthread
- *
- */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-
-struct myvalue {
- int i;
-};
-
-void netdata_cleanup_and_exit(int ret) { exit(ret); }
-
-int main(int argc, char **argv) {
- if(argc || argv) {;}
-
-// DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_WITH_STATISTICS);
- DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_WITH_STATISTICS);
- if(!dict) fatal("Cannot create dictionary.");
-
- struct rusage start, end;
- unsigned long long dt;
- char buf[100 + 1];
- struct myvalue value, *v;
- int i, max = 30000000, max2;
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Inserting %d entries in the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- dictionary_set(dict, buf, &value, sizeof(struct myvalue));
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Added %d entries in %llu nanoseconds: %llu inserts per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Retrieving %d entries from the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- v = dictionary_get(dict, buf);
- if(!v)
- fprintf(stderr, "ERROR: cannot get value %d from the dictionary\n", i);
- else if(v->i != i)
- fprintf(stderr, "ERROR: expected %d but got %d\n", i, v->i);
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Read %d entries in %llu nanoseconds: %llu searches per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Resetting %d entries in the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- dictionary_set(dict, buf, &value, sizeof(struct myvalue));
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Reset %d entries in %llu nanoseconds: %llu resets per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Searching %d non-existing entries in the dictionary\n", max);
- max2 = max * 2;
- for(i = max; i < max2; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- v = dictionary_get(dict, buf);
- if(v)
- fprintf(stderr, "ERROR: cannot got non-existing value %d from the dictionary\n", i);
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Searched %d non-existing entries in %llu nanoseconds: %llu not found searches per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Deleting %d entries from the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- dictionary_del(dict, buf);
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Deleted %d entries in %llu nanoseconds: %llu deletes per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Destroying dictionary\n");
- dictionary_destroy(dict);
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Destroyed in %llu nanoseconds\n", dt);
-
- return 0;
-}
diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c
deleted file mode 100644
index c07d1d857..000000000
--- a/tests/profile/benchmark-line-parsing.c
+++ /dev/null
@@ -1,707 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-#include <stdio.h>
-#include <inttypes.h>
-#include <string.h>
-#include <stdlib.h>
-#include <ctype.h>
-#include <sys/time.h>
-
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
-
-#define simple_hash(name) ({ \
- register unsigned char *__hash_source = (unsigned char *)(name); \
- register uint32_t __hash_value = 0x811c9dc5; \
- while (*__hash_source) { \
- __hash_value *= 16777619; \
- __hash_value ^= (uint32_t) *__hash_source++; \
- } \
- __hash_value; \
-})
-
-static inline uint32_t simple_hash2(const char *name) {
- register unsigned char *s = (unsigned char *)name;
- register uint32_t hval = 0x811c9dc5;
- while (*s) {
- hval *= 16777619;
- hval ^= (uint32_t) *s++;
- }
- return hval;
-}
-
-static inline unsigned long long fast_strtoull(const char *s) {
- register unsigned long long n = 0;
- register char c;
- for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
- n *= 10;
- n += c - '0';
- // n = (n << 1) + (n << 3) + (c - '0');
- }
- return n;
-}
-
-static uint32_t cache_hash = 0;
-static uint32_t rss_hash = 0;
-static uint32_t rss_huge_hash = 0;
-static uint32_t mapped_file_hash = 0;
-static uint32_t writeback_hash = 0;
-static uint32_t dirty_hash = 0;
-static uint32_t swap_hash = 0;
-static uint32_t pgpgin_hash = 0;
-static uint32_t pgpgout_hash = 0;
-static uint32_t pgfault_hash = 0;
-static uint32_t pgmajfault_hash = 0;
-static uint32_t inactive_anon_hash = 0;
-static uint32_t active_anon_hash = 0;
-static uint32_t inactive_file_hash = 0;
-static uint32_t active_file_hash = 0;
-static uint32_t unevictable_hash = 0;
-static uint32_t hierarchical_memory_limit_hash = 0;
-static uint32_t total_cache_hash = 0;
-static uint32_t total_rss_hash = 0;
-static uint32_t total_rss_huge_hash = 0;
-static uint32_t total_mapped_file_hash = 0;
-static uint32_t total_writeback_hash = 0;
-static uint32_t total_dirty_hash = 0;
-static uint32_t total_swap_hash = 0;
-static uint32_t total_pgpgin_hash = 0;
-static uint32_t total_pgpgout_hash = 0;
-static uint32_t total_pgfault_hash = 0;
-static uint32_t total_pgmajfault_hash = 0;
-static uint32_t total_inactive_anon_hash = 0;
-static uint32_t total_active_anon_hash = 0;
-static uint32_t total_inactive_file_hash = 0;
-static uint32_t total_active_file_hash = 0;
-static uint32_t total_unevictable_hash = 0;
-
-char *strings[] = {
- "cache",
- "rss",
- "rss_huge",
- "mapped_file",
- "writeback",
- "dirty",
- "swap",
- "pgpgin",
- "pgpgout",
- "pgfault",
- "pgmajfault",
- "inactive_anon",
- "active_anon",
- "inactive_file",
- "active_file",
- "unevictable",
- "hierarchical_memory_limit",
- "total_cache",
- "total_rss",
- "total_rss_huge",
- "total_mapped_file",
- "total_writeback",
- "total_dirty",
- "total_swap",
- "total_pgpgin",
- "total_pgpgout",
- "total_pgfault",
- "total_pgmajfault",
- "total_inactive_anon",
- "total_active_anon",
- "total_inactive_file",
- "total_active_file",
- "total_unevictable",
- NULL
-};
-
-unsigned long long values1[12] = { 0 };
-unsigned long long values2[12] = { 0 };
-unsigned long long values3[12] = { 0 };
-unsigned long long values4[12] = { 0 };
-unsigned long long values5[12] = { 0 };
-unsigned long long values6[12] = { 0 };
-
-#define NUMBER1 "12345678901234"
-#define NUMBER2 "23456789012345"
-#define NUMBER3 "34567890123456"
-#define NUMBER4 "45678901234567"
-#define NUMBER5 "56789012345678"
-#define NUMBER6 "67890123456789"
-#define NUMBER7 "78901234567890"
-#define NUMBER8 "89012345678901"
-#define NUMBER9 "90123456789012"
-#define NUMBER10 "12345678901234"
-#define NUMBER11 "23456789012345"
-
-// simple system strcmp()
-void test1() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
-
- if(unlikely(!strcmp(s, "cache")))
- values1[i] = strtoull(NUMBER1, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss")))
- values1[i] = strtoull(NUMBER2, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss_huge")))
- values1[i] = strtoull(NUMBER3, NULL, 10);
-
- else if(unlikely(!strcmp(s, "mapped_file")))
- values1[i] = strtoull(NUMBER4, NULL, 10);
-
- else if(unlikely(!strcmp(s, "writeback")))
- values1[i] = strtoull(NUMBER5, NULL, 10);
-
- else if(unlikely(!strcmp(s, "dirty")))
- values1[i] = strtoull(NUMBER6, NULL, 10);
-
- else if(unlikely(!strcmp(s, "swap")))
- values1[i] = strtoull(NUMBER7, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgin")))
- values1[i] = strtoull(NUMBER8, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgout")))
- values1[i] = strtoull(NUMBER9, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgfault")))
- values1[i] = strtoull(NUMBER10, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgmajfault")))
- values1[i] = strtoull(NUMBER11, NULL, 10);
- }
-}
-
-// inline simple_hash() with system strtoull()
-void test2() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values2[i] = strtoull(NUMBER1, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values2[i] = strtoull(NUMBER2, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values2[i] = strtoull(NUMBER3, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values2[i] = strtoull(NUMBER4, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values2[i] = strtoull(NUMBER5, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values2[i] = strtoull(NUMBER6, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values2[i] = strtoull(NUMBER7, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values2[i] = strtoull(NUMBER8, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values2[i] = strtoull(NUMBER9, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values2[i] = strtoull(NUMBER10, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values2[i] = strtoull(NUMBER11, NULL, 10);
- }
-}
-
-// statement expression simple_hash(), system strtoull()
-void test3() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values3[i] = strtoull(NUMBER1, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values3[i] = strtoull(NUMBER2, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values3[i] = strtoull(NUMBER3, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values3[i] = strtoull(NUMBER4, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values3[i] = strtoull(NUMBER5, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values3[i] = strtoull(NUMBER6, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values3[i] = strtoull(NUMBER7, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values3[i] = strtoull(NUMBER8, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values3[i] = strtoull(NUMBER9, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values3[i] = strtoull(NUMBER10, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values3[i] = strtoull(NUMBER11, NULL, 10);
- }
-}
-
-
-// inline simple_hash(), if-continue checks
-void test4() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
- values4[i] = strtoull(NUMBER1, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
- values4[i] = strtoull(NUMBER2, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
- values4[i] = strtoull(NUMBER3, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
- values4[i] = strtoull(NUMBER4, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
- values4[i] = strtoull(NUMBER5, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
- values4[i] = strtoull(NUMBER6, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
- values4[i] = strtoull(NUMBER7, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
- values4[i] = strtoull(NUMBER8, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
- values4[i] = strtoull(NUMBER9, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
- values4[i] = strtoull(NUMBER10, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
- values4[i] = strtoull(NUMBER11, NULL, 0);
- continue;
- }
- }
-}
-
-// inline simple_hash(), if-else-if-else-if (netdata default)
-void test5() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values5[i] = fast_strtoull(NUMBER1);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values5[i] = fast_strtoull(NUMBER2);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values5[i] = fast_strtoull(NUMBER3);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values5[i] = fast_strtoull(NUMBER4);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values5[i] = fast_strtoull(NUMBER5);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values5[i] = fast_strtoull(NUMBER6);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values5[i] = fast_strtoull(NUMBER7);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values5[i] = fast_strtoull(NUMBER8);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values5[i] = fast_strtoull(NUMBER9);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values5[i] = fast_strtoull(NUMBER10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values5[i] = fast_strtoull(NUMBER11);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-struct entry {
- char *name;
- uint32_t hash;
- int found;
- void (*func)(void *data1, void *data2);
- void *data1;
- void *data2;
- struct entry *prev, *next;
-};
-
-struct base {
- int iteration;
- int registered;
- int wanted;
- int found;
- struct entry *entries, *last;
-};
-
-static inline void callback(void *data1, void *data2) {
- char *string = data1;
- unsigned long long *value = data2;
- *value = fast_strtoull(string);
-}
-
-static inline void callback_system_strtoull(void *data1, void *data2) {
- char *string = data1;
- unsigned long long *value = data2;
- *value = strtoull(string, NULL, 10);
-}
-
-
-static inline struct base *entry(struct base *base, const char *name, void *data1, void *data2, void (*func)(void *, void *)) {
- if(!base)
- base = calloc(1, sizeof(struct base));
-
- struct entry *e = malloc(sizeof(struct entry));
- e->name = strdup(name);
- e->hash = simple_hash2(e->name);
- e->data1 = data1;
- e->data2 = data2;
- e->func = func;
- e->prev = NULL;
- e->next = base->entries;
-
- if(base->entries) base->entries->prev = e;
- else base->last = e;
-
- base->entries = e;
- base->registered++;
- base->wanted = base->registered;
-
- return base;
-}
-
-static inline int check(struct base *base, const char *s) {
- uint32_t hash = simple_hash2(s);
-
- if(likely(!strcmp(s, base->last->name))) {
- base->last->found = 1;
- base->found++;
- if(base->last->func) base->last->func(base->last->data1, base->last->data2);
- base->last = base->last->next;
-
- if(!base->last)
- base->last = base->entries;
-
- if(base->found == base->registered)
- return 1;
-
- return 0;
- }
-
- // find it
- struct entry *e;
- for(e = base->entries; e ; e = e->next)
- if(e->hash == hash && !strcmp(e->name, s))
- break;
-
- if(e == base->last) {
- printf("ERROR\n");
- exit(1);
- }
-
- if(e) {
- // found
-
- // run it
- if(e->func) e->func(e->data1, e->data2);
-
- // unlink it
- if(e->next) e->next->prev = e->prev;
- if(e->prev) e->prev->next = e->next;
-
- if(base->entries == e)
- base->entries = e->next;
- }
- else {
- // not found
-
- // create it
- e = calloc(1, sizeof(struct entry));
- e->name = strdup(s);
- e->hash = hash;
- }
-
- // link it here
- e->next = base->last;
- if(base->last) {
- e->prev = base->last->prev;
- base->last->prev = e;
-
- if(base->entries == base->last)
- base->entries = e;
- }
- else
- e->prev = NULL;
-
- if(e->prev)
- e->prev->next = e;
-
- base->last = e->next;
- if(!base->last)
- base->last = base->entries;
-
- e->found = 1;
- base->found++;
-
- if(base->found == base->registered)
- return 1;
-
- printf("relinked '%s' after '%s' and before '%s': ", e->name, e->prev?e->prev->name:"NONE", e->next?e->next->name:"NONE");
- for(e = base->entries; e ; e = e->next) printf("%s ", e->name);
- printf("\n");
-
- return 0;
-}
-
-static inline void begin(struct base *base) {
-
- if(unlikely(base->iteration % 60) == 1) {
- base->wanted = 0;
- struct entry *e;
- for(e = base->entries; e ; e = e->next)
- if(e->found) base->wanted++;
- }
-
- base->iteration++;
- base->last = base->entries;
- base->found = 0;
-}
-
-void test6() {
-
- static struct base *base = NULL;
-
- if(unlikely(!base)) {
- base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull);
- base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull);
- base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull);
- base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull);
- base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull);
- base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull);
- base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull);
- base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull);
- base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull);
- base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull);
- base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull);
- }
-
- begin(base);
-
- int i;
- for(i = 0; strings[i] ; i++) {
- if(check(base, strings[i]))
- break;
- }
-}
-
-void test7() {
-
- static struct base *base = NULL;
-
- if(unlikely(!base)) {
- base = entry(base, "cache", NUMBER1, &values6[0], callback);
- base = entry(base, "rss", NUMBER2, &values6[1], callback);
- base = entry(base, "rss_huge", NUMBER3, &values6[2], callback);
- base = entry(base, "mapped_file", NUMBER4, &values6[3], callback);
- base = entry(base, "writeback", NUMBER5, &values6[4], callback);
- base = entry(base, "dirty", NUMBER6, &values6[5], callback);
- base = entry(base, "swap", NUMBER7, &values6[6], callback);
- base = entry(base, "pgpgin", NUMBER8, &values6[7], callback);
- base = entry(base, "pgpgout", NUMBER9, &values6[8], callback);
- base = entry(base, "pgfault", NUMBER10, &values6[9], callback);
- base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback);
- }
-
- begin(base);
-
- int i;
- for(i = 0; strings[i] ; i++) {
- if(check(base, strings[i]))
- break;
- }
-}
-
-// ----------------------------------------------------------------------------
-
-
-// ==============
-// --- Poor man cycle counting.
-static unsigned long tsc;
-
-static void begin_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
- tsc = ((unsigned long)d << 32) | (unsigned long)a;
-}
-
-static unsigned long end_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
- return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
-}
-// ===============
-
-static unsigned long long clk;
-
-static void begin_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return;
- clk = tv.tv_sec * 1000000 + tv.tv_usec;
-}
-
-static unsigned long long end_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return -1;
- return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
-}
-
-void main(void)
-{
- cache_hash = simple_hash("cache");
- rss_hash = simple_hash("rss");
- rss_huge_hash = simple_hash("rss_huge");
- mapped_file_hash = simple_hash("mapped_file");
- writeback_hash = simple_hash("writeback");
- dirty_hash = simple_hash("dirty");
- swap_hash = simple_hash("swap");
- pgpgin_hash = simple_hash("pgpgin");
- pgpgout_hash = simple_hash("pgpgout");
- pgfault_hash = simple_hash("pgfault");
- pgmajfault_hash = simple_hash("pgmajfault");
- inactive_anon_hash = simple_hash("inactive_anon");
- active_anon_hash = simple_hash("active_anon");
- inactive_file_hash = simple_hash("inactive_file");
- active_file_hash = simple_hash("active_file");
- unevictable_hash = simple_hash("unevictable");
- hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
- total_cache_hash = simple_hash("total_cache");
- total_rss_hash = simple_hash("total_rss");
- total_rss_huge_hash = simple_hash("total_rss_huge");
- total_mapped_file_hash = simple_hash("total_mapped_file");
- total_writeback_hash = simple_hash("total_writeback");
- total_dirty_hash = simple_hash("total_dirty");
- total_swap_hash = simple_hash("total_swap");
- total_pgpgin_hash = simple_hash("total_pgpgin");
- total_pgpgout_hash = simple_hash("total_pgpgout");
- total_pgfault_hash = simple_hash("total_pgfault");
- total_pgmajfault_hash = simple_hash("total_pgmajfault");
- total_inactive_anon_hash = simple_hash("total_inactive_anon");
- total_active_anon_hash = simple_hash("total_active_anon");
- total_inactive_file_hash = simple_hash("total_inactive_file");
- total_active_file_hash = simple_hash("total_active_file");
- total_unevictable_hash = simple_hash("total_unevictable");
-
- // cache functions
- (void)simple_hash2("hello world");
- (void)strcmp("1", "2");
- (void)strtoull("123", NULL, 0);
-
- unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7;
- unsigned long max = 1000000;
-
- // let the processor get up to speed
- begin_clock();
- for(i = 0; i <= max ;i++) test1();
- c1 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test1();
- c1 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test2();
- c2 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test3();
- c3 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test4();
- c4 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test5();
- c5 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test6();
- c6 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test7();
- c7 = end_clock();
-
- for(i = 0; i < 11 ; i++)
- printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]);
-
- printf("\n\nRESULTS\n");
- printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n"
- "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n"
- "test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n"
- "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n"
- "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n"
- "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n"
- "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n"
- , c1
- , c2
- , c3
- , c4
- , c5
- , c6
- , c7
- );
-
-}
diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c
deleted file mode 100644
index 991e2dfc8..000000000
--- a/tests/profile/benchmark-procfile-parser.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-
-void netdata_cleanup_and_exit(int ret) {
- exit(ret);
-}
-
-#define PF_PREFIX "PROCFILE"
-#define PFWORDS_INCREASE_STEP 200
-#define PFLINES_INCREASE_STEP 10
-#define PROCFILE_INCREMENT_BUFFER 512
-extern size_t procfile_max_lines;
-extern size_t procfile_max_words;
-extern size_t procfile_max_allocation;
-
-
-static inline void pflines_reset(pflines *fl) {
- // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
-
- fl->len = 0;
-}
-
-static inline void pflines_free(pflines *fl) {
- // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
-
- freez(fl);
-}
-
-static inline void pfwords_reset(pfwords *fw) {
- // debug(D_PROCFILE, PF_PREFIX ": reseting words");
- fw->len = 0;
-}
-
-
-static inline void pfwords_add(procfile *ff, char *str) {
- // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
-
- pfwords *fw = ff->words;
- if(unlikely(fw->len == fw->size)) {
- // debug(D_PROCFILE, PF_PREFIX ": expanding words");
-
- ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
- fw->size += PFWORDS_INCREASE_STEP;
- }
-
- fw->words[fw->len++] = str;
-}
-
-NEVERNULL
-static inline size_t *pflines_add(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
-
- pflines *fl = ff->lines;
- if(unlikely(fl->len == fl->size)) {
- // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
-
- ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
- fl->size += PFLINES_INCREASE_STEP;
- }
-
- ffline *ffl = &fl->lines[fl->len++];
- ffl->words = 0;
- ffl->first = ff->words->len;
-
- return &ffl->words;
-}
-
-
-NOINLINE
-static void procfile_parser(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
-
- char *s = ff->data // our current position
- , *e = &ff->data[ff->len] // the terminating null
- , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
-
- // the look up array to find our type of character
- PF_CHAR_TYPE *separators = ff->separators;
-
- char quote = 0; // the quote character - only when in quoted string
- size_t opened = 0; // counts the number of open parenthesis
-
- size_t *line_words = pflines_add(ff);
-
- while(s < e) {
- PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
-
- // this is faster than a switch()
- // read more here: http://lazarenko.me/switch/
- switch(ct) {
- case PF_CHAR_IS_SEPARATOR:
- if(!quote && !opened) {
- if (s != t) {
- // separator, but we have word before it
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- }
- t = s + 1;
- }
- // fallthrough
-
- case PF_CHAR_IS_WORD:
- s++;
- break;
-
-
- case PF_CHAR_IS_NEWLINE:
- // end of line
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
-
- // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
-
- line_words = pflines_add(ff);
- break;
-
- case PF_CHAR_IS_QUOTE:
- if(unlikely(!quote && s == t)) {
- // quote opened at the beginning
- quote = *s;
- t = ++s;
- }
- else if(unlikely(quote && quote == *s)) {
- // quote closed
- quote = 0;
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else
- s++;
- break;
-
- case PF_CHAR_IS_OPEN:
- if(s == t) {
- opened++;
- t = ++s;
- }
- else if(opened) {
- opened++;
- s++;
- }
- else
- s++;
- break;
-
- case PF_CHAR_IS_CLOSE:
- if(opened) {
- opened--;
-
- if(!opened) {
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else
- s++;
- }
- else
- s++;
- break;
-
- default:
- fatal("Internal Error: procfile_readall() does not handle all the cases.");
- }
- }
-
- if(likely(s > t && t < e)) {
- // the last word
- if(unlikely(ff->len >= ff->size)) {
- // we are going to loose the last byte
- s = &ff->data[ff->size - 1];
- }
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- // t = ++s;
- }
-}
-
-
-procfile *procfile_readall1(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
-
- ff->len = 0; // zero the used size
- ssize_t r = 1; // read at least once
- while(r > 0) {
- ssize_t s = ff->len;
- ssize_t x = ff->size - s;
-
- if(unlikely(!x)) {
- debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
- ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
- ff->size += PROCFILE_INCREMENT_BUFFER;
- }
-
- debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
- r = read(ff->fd, &ff->data[s], ff->size - s);
- if(unlikely(r == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
- procfile_close(ff);
- return NULL;
- }
-
- ff->len += r;
- }
-
- // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
- if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
- procfile_close(ff);
- return NULL;
- }
-
- pflines_reset(ff->lines);
- pfwords_reset(ff->words);
- procfile_parser(ff);
-
- if(unlikely(procfile_adaptive_initial_allocation)) {
- if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
- if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
- if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
- }
-
- // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
- return ff;
-}
-
-
-
-
-
-
-
-
-// ==============
-// --- Poor man cycle counting.
-static unsigned long tsc;
-
-void begin_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
- tsc = ((unsigned long)d << 32) | (unsigned long)a;
-}
-
-unsigned long end_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
- return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
-}
-// ==============
-
-
-unsigned long test_netdata_internal(void) {
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(!ff) {
- fprintf(stderr, "Failed to open filename\n");
- exit(1);
- }
-
- begin_tsc();
- ff = procfile_readall(ff);
- unsigned long c = end_tsc();
-
- if(!ff) {
- fprintf(stderr, "Failed to read filename\n");
- exit(1);
- }
-
- return c;
-}
-
-unsigned long test_method1(void) {
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(!ff) {
- fprintf(stderr, "Failed to open filename\n");
- exit(1);
- }
-
- begin_tsc();
- ff = procfile_readall1(ff);
- unsigned long c = end_tsc();
-
- if(!ff) {
- fprintf(stderr, "Failed to read filename\n");
- exit(1);
- }
-
- return c;
-}
-
-//--- Test
-int main(int argc, char **argv)
-{
- (void)argc; (void)argv;
-
- int i, max = 1000000;
-
- unsigned long c1 = 0;
- test_netdata_internal();
- for(i = 0; i < max ; i++)
- c1 += test_netdata_internal();
-
- unsigned long c2 = 0;
- test_method1();
- for(i = 0; i < max ; i++)
- c2 += test_method1();
-
- printf("netdata internal: completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c1, c1 / max, (float)c1 * 100.0 / (float)c1);
- printf("method1 : completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c2, c2 / max, (float)c2 * 100.0 / (float)c1);
-
- return 0;
-}
diff --git a/tests/profile/benchmark-registry.c b/tests/profile/benchmark-registry.c
deleted file mode 100644
index cfed6d7c8..000000000
--- a/tests/profile/benchmark-registry.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-/*
- * compile with
- * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o benchmark-registry benchmark-registry.c ../src/dictionary.o ../src/log.o ../src/avl.o ../src/common.o ../src/appconfig.o ../src/web_buffer.o ../src/storage_number.o ../src/rrd.o ../src/health.o -pthread -luuid -lm -DHAVE_CONFIG_H -DVARLIB_DIR="\"/tmp\""
- */
-
-char *hostname = "me";
-
-#include "../src/registry.c"
-
-void netdata_cleanup_and_exit(int ret) { exit(ret); }
-
-// ----------------------------------------------------------------------------
-// TESTS
-
-int test1(int argc, char **argv) {
-
- void print_stats(uint32_t requests, unsigned long long start, unsigned long long end) {
- fprintf(stderr, " > SPEED: %u requests served in %0.2f seconds ( >>> %llu per second <<< )\n",
- requests, (end-start) / 1000000.0, (unsigned long long)requests * 1000000ULL / (end-start));
-
- fprintf(stderr, " > DB : persons %llu, machines %llu, unique URLs %llu, accesses %llu, URLs: for persons %llu, for machines %llu\n",
- registry.persons_count, registry.machines_count, registry.urls_count, registry.usages_count,
- registry.persons_urls_count, registry.machines_urls_count);
- }
-
- (void) argc;
- (void) argv;
-
- uint32_t u, users = 1000000;
- uint32_t m, machines = 200000;
- uint32_t machines2 = machines * 2;
-
- char **users_guids = malloc(users * sizeof(char *));
- char **machines_guids = malloc(machines2 * sizeof(char *));
- char **machines_urls = malloc(machines2 * sizeof(char *));
- unsigned long long start;
-
- registry_init();
-
- fprintf(stderr, "Generating %u machine guids\n", machines2);
- for(m = 0; m < machines2 ;m++) {
- uuid_t uuid;
- machines_guids[m] = malloc(36+1);
- uuid_generate(uuid);
- uuid_unparse(uuid, machines_guids[m]);
-
- char buf[FILENAME_MAX + 1];
- snprintfz(buf, FILENAME_MAX, "http://%u.netdata.rocks/", m+1);
- machines_urls[m] = strdup(buf);
-
- // fprintf(stderr, "\tmachine %u: '%s', url: '%s'\n", m + 1, machines_guids[m], machines_urls[m]);
- }
-
- start = timems();
- fprintf(stderr, "\nGenerating %u users accessing %u machines\n", users, machines);
- m = 0;
- time_t now = time(NULL);
- for(u = 0; u < users ; u++) {
- if(++m == machines) m = 0;
-
- PERSON *p = registry_request_access(NULL, machines_guids[m], machines_urls[m], "test", now);
- users_guids[u] = p->guid;
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\nAll %u users accessing again the same %u servers\n", users, machines);
- m = 0;
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- if(++m == machines) m = 0;
-
- PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
-
- if(p->guid != users_guids[u])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\nAll %u users accessing a new server, out of the %u servers\n", users, machines);
- m = 1;
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- if(++m == machines) m = 0;
-
- PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
-
- if(p->guid != users_guids[u])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\n%u random users accessing a random server, out of the %u servers\n", users, machines);
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- uint32_t tu = random() * users / RAND_MAX;
- uint32_t tm = random() * machines / RAND_MAX;
-
- PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
-
- if(p->guid != users_guids[tu])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\n%u random users accessing a random server, out of %u servers\n", users, machines2);
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- uint32_t tu = random() * users / RAND_MAX;
- uint32_t tm = random() * machines2 / RAND_MAX;
-
- PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
-
- if(p->guid != users_guids[tu])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
- }
- print_stats(u, start, timems());
-
- for(m = 0; m < 10; m++) {
- start = timems();
- fprintf(stderr,
- "\n%u random user accesses to a random server, out of %u servers,\n > using 1/10000 with a random url, 1/1000 with a mismatched url\n",
- users * 2, machines2);
- now = time(NULL);
- for (u = 0; u < users * 2; u++) {
- uint32_t tu = random() * users / RAND_MAX;
- uint32_t tm = random() * machines2 / RAND_MAX;
-
- char *url = machines_urls[tm];
- char buf[FILENAME_MAX + 1];
- if (random() % 10000 == 1234) {
- snprintfz(buf, FILENAME_MAX, "http://random.%ld.netdata.rocks/", random());
- url = buf;
- }
- else if (random() % 1000 == 123)
- url = machines_urls[random() * machines2 / RAND_MAX];
-
- PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], url, "test", now);
-
- if (p->guid != users_guids[tu])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
- }
- print_stats(u, start, timems());
- }
-
- fprintf(stderr, "\n\nSAVE\n");
- start = timems();
- registry_save();
- print_stats(registry.persons_count, start, timems());
-
- fprintf(stderr, "\n\nCLEANUP\n");
- start = timems();
- registry_free();
- print_stats(registry.persons_count, start, timems());
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// TESTING
-
-int main(int argc, char **argv) {
- config_set_boolean("registry", "enabled", 1);
-
- //debug_flags = 0xFFFFFFFF;
- test1(argc, argv);
- exit(0);
-
- (void)argc;
- (void)argv;
-
-
- PERSON *p1, *p2;
-
- fprintf(stderr, "\n\nINITIALIZATION\n");
-
- registry_init();
-
- int i = 2;
-
- fprintf(stderr, "\n\nADDING ENTRY\n");
- p1 = registry_request_access("2c95abd0-1542-11e6-8c66-00508db7e9c9", "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
-
- if(0)
- while(i--) {
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ENTRY\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER URL\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://127.0.0.1:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER URL\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER PERSON\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p2 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p2 = registry_request_access(p2->guid, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
- }
-
- fprintf(stderr, "\n\nSAVE\n");
- registry_save();
-
- fprintf(stderr, "\n\nCLEANUP\n");
- registry_free();
- return 0;
-}
diff --git a/tests/profile/benchmark-value-pairs.c b/tests/profile/benchmark-value-pairs.c
deleted file mode 100644
index ae4f53c3a..000000000
--- a/tests/profile/benchmark-value-pairs.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-
-#ifdef simple_hash
-#undef simple_hash
-#endif
-
-void netdata_cleanup_and_exit(int ret) {
- exit(ret);
-}
-
-#define simple_hash(name) ({ \
- register unsigned char *__hash_source = (unsigned char *)(name); \
- register uint32_t __hash_value = 0x811c9dc5; \
- while (*__hash_source) { \
- __hash_value *= 16777619; \
- __hash_value ^= (uint32_t) *__hash_source++; \
- } \
- __hash_value; \
-})
-
-static inline uint32_t simple_hash2(const char *name) {
- register unsigned char *s = (unsigned char *)name;
- register uint32_t hval = 0x811c9dc5;
- while (*s) {
- hval *= 16777619;
- hval ^= (uint32_t) *s++;
- }
- return hval;
-}
-
-static inline unsigned long long fast_strtoull(const char *s) {
- register unsigned long long n = 0;
- register char c;
- for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
- n *= 10;
- n += c - '0';
- // n = (n << 1) + (n << 3) + (c - '0');
- }
- return n;
-}
-
-static uint32_t cache_hash = 0;
-static uint32_t rss_hash = 0;
-static uint32_t rss_huge_hash = 0;
-static uint32_t mapped_file_hash = 0;
-static uint32_t writeback_hash = 0;
-static uint32_t dirty_hash = 0;
-static uint32_t swap_hash = 0;
-static uint32_t pgpgin_hash = 0;
-static uint32_t pgpgout_hash = 0;
-static uint32_t pgfault_hash = 0;
-static uint32_t pgmajfault_hash = 0;
-static uint32_t inactive_anon_hash = 0;
-static uint32_t active_anon_hash = 0;
-static uint32_t inactive_file_hash = 0;
-static uint32_t active_file_hash = 0;
-static uint32_t unevictable_hash = 0;
-static uint32_t hierarchical_memory_limit_hash = 0;
-static uint32_t total_cache_hash = 0;
-static uint32_t total_rss_hash = 0;
-static uint32_t total_rss_huge_hash = 0;
-static uint32_t total_mapped_file_hash = 0;
-static uint32_t total_writeback_hash = 0;
-static uint32_t total_dirty_hash = 0;
-static uint32_t total_swap_hash = 0;
-static uint32_t total_pgpgin_hash = 0;
-static uint32_t total_pgpgout_hash = 0;
-static uint32_t total_pgfault_hash = 0;
-static uint32_t total_pgmajfault_hash = 0;
-static uint32_t total_inactive_anon_hash = 0;
-static uint32_t total_active_anon_hash = 0;
-static uint32_t total_inactive_file_hash = 0;
-static uint32_t total_active_file_hash = 0;
-static uint32_t total_unevictable_hash = 0;
-
-unsigned long long values1[50] = { 0 };
-unsigned long long values2[50] = { 0 };
-unsigned long long values3[50] = { 0 };
-unsigned long long values4[50] = { 0 };
-unsigned long long values5[50] = { 0 };
-unsigned long long values6[50] = { 0 };
-unsigned long long values7[50] = { 0 };
-unsigned long long values8[50] = { 0 };
-unsigned long long values9[50] = { 0 };
-
-struct pair {
- const char *name;
- const char *value;
- uint32_t hash;
- unsigned long long *collected8;
- unsigned long long *collected9;
-} pairs[] = {
- { "cache", "12345678901234", 0, &values8[0] ,&values9[0] },
- { "rss", "23456789012345", 0, &values8[1] ,&values9[1] },
- { "rss_huge", "34567890123456", 0, &values8[2] ,&values9[2] },
- { "mapped_file", "45678901234567", 0, &values8[3] ,&values9[3] },
- { "writeback", "56789012345678", 0, &values8[4] ,&values9[4] },
- { "dirty", "67890123456789", 0, &values8[5] ,&values9[5] },
- { "swap", "78901234567890", 0, &values8[6] ,&values9[6] },
- { "pgpgin", "89012345678901", 0, &values8[7] ,&values9[7] },
- { "pgpgout", "90123456789012", 0, &values8[8] ,&values9[8] },
- { "pgfault", "10345678901234", 0, &values8[9] ,&values9[9] },
- { "pgmajfault", "11456789012345", 0, &values8[10] ,&values9[10] },
- { "inactive_anon", "12000000000000", 0, &values8[11] ,&values9[11] },
- { "active_anon", "13345678901234", 0, &values8[12] ,&values9[12] },
- { "inactive_file", "14345678901234", 0, &values8[13] ,&values9[13] },
- { "active_file", "15345678901234", 0, &values8[14] ,&values9[14] },
- { "unevictable", "16345678901234", 0, &values8[15] ,&values9[15] },
- { "hierarchical_memory_limit", "17345678901234", 0, &values8[16] ,&values9[16] },
- { "total_cache", "18345678901234", 0, &values8[17] ,&values9[17] },
- { "total_rss", "19345678901234", 0, &values8[18] ,&values9[18] },
- { "total_rss_huge", "20345678901234", 0, &values8[19] ,&values9[19] },
- { "total_mapped_file", "21345678901234", 0, &values8[20] ,&values9[20] },
- { "total_writeback", "22345678901234", 0, &values8[21] ,&values9[21] },
- { "total_dirty", "23000000000000", 0, &values8[22] ,&values9[22] },
- { "total_swap", "24345678901234", 0, &values8[23] ,&values9[23] },
- { "total_pgpgin", "25345678901234", 0, &values8[24] ,&values9[24] },
- { "total_pgpgout", "26345678901234", 0, &values8[25] ,&values9[25] },
- { "total_pgfault", "27345678901234", 0, &values8[26] ,&values9[26] },
- { "total_pgmajfault", "28345678901234", 0, &values8[27] ,&values9[27] },
- { "total_inactive_anon", "29345678901234", 0, &values8[28] ,&values9[28] },
- { "total_active_anon", "30345678901234", 0, &values8[29] ,&values9[29] },
- { "total_inactive_file", "31345678901234", 0, &values8[30] ,&values9[30] },
- { "total_active_file", "32345678901234", 0, &values8[31] ,&values9[31] },
- { "total_unevictable", "33345678901234", 0, &values8[32] ,&values9[32] },
- { NULL, NULL , 0, NULL ,NULL }
-};
-
-// simple system strcmp()
-void test1() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- if(unlikely(!strcmp(s, "cache")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss_huge")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "mapped_file")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "writeback")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "dirty")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "swap")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgin")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgout")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgfault")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgmajfault")))
- values1[i] = strtoull(v, NULL, 10);
- }
-}
-
-// inline simple_hash() with system strtoull()
-void test2() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values2[i] = strtoull(v, NULL, 10);
- }
-}
-
-// statement expression simple_hash(), system strtoull()
-void test3() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values3[i] = strtoull(v, NULL, 10);
- }
-}
-
-
-// inline simple_hash(), if-continue checks
-void test4() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
- }
-}
-
-// inline simple_hash(), if-else-if-else-if (netdata default)
-void test5() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values5[i] = fast_strtoull(v);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-void arl_strtoull(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register unsigned long long *d = dst;
- *d = strtoull(value, NULL, 10);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
-}
-
-void test6() {
- static ARL_BASE *base = NULL;
-
- if(unlikely(!base)) {
- base = arl_create("test6", arl_strtoull, 60);
- arl_expect_custom(base, "cache", NULL, &values6[0]);
- arl_expect_custom(base, "rss", NULL, &values6[1]);
- arl_expect_custom(base, "rss_huge", NULL, &values6[2]);
- arl_expect_custom(base, "mapped_file", NULL, &values6[3]);
- arl_expect_custom(base, "writeback", NULL, &values6[4]);
- arl_expect_custom(base, "dirty", NULL, &values6[5]);
- arl_expect_custom(base, "swap", NULL, &values6[6]);
- arl_expect_custom(base, "pgpgin", NULL, &values6[7]);
- arl_expect_custom(base, "pgpgout", NULL, &values6[8]);
- arl_expect_custom(base, "pgfault", NULL, &values6[9]);
- arl_expect_custom(base, "pgmajfault", NULL, &values6[10]);
- }
-
- arl_begin(base);
-
- int i;
- for(i = 0; pairs[i].name ; i++)
- if(arl_check(base, pairs[i].name, pairs[i].value)) break;
-}
-
-void arl_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register unsigned long long *d = dst;
- *d = str2ull(value);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
-}
-
-void test7() {
- static ARL_BASE *base = NULL;
-
- if(unlikely(!base)) {
- base = arl_create("test7", arl_str2ull, 60);
- arl_expect_custom(base, "cache", NULL, &values7[0]);
- arl_expect_custom(base, "rss", NULL, &values7[1]);
- arl_expect_custom(base, "rss_huge", NULL, &values7[2]);
- arl_expect_custom(base, "mapped_file", NULL, &values7[3]);
- arl_expect_custom(base, "writeback", NULL, &values7[4]);
- arl_expect_custom(base, "dirty", NULL, &values7[5]);
- arl_expect_custom(base, "swap", NULL, &values7[6]);
- arl_expect_custom(base, "pgpgin", NULL, &values7[7]);
- arl_expect_custom(base, "pgpgout", NULL, &values7[8]);
- arl_expect_custom(base, "pgfault", NULL, &values7[9]);
- arl_expect_custom(base, "pgmajfault", NULL, &values7[10]);
- }
-
- arl_begin(base);
-
- int i;
- for(i = 0; pairs[i].name ; i++)
- if(arl_check(base, pairs[i].name, pairs[i].value)) break;
-}
-
-void test8() {
- int i;
- for(i = 0; pairs[i].name; i++) {
- uint32_t hash = simple_hash(pairs[i].name);
-
- int j;
- for(j = 0; pairs[j].name; j++) {
- if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
- *pairs[j].collected8 = strtoull(pairs[i].value, NULL, 10);
- break;
- }
- }
- }
-}
-
-void test9() {
- int i;
- for(i = 0; pairs[i].name; i++) {
- uint32_t hash = simple_hash(pairs[i].name);
-
- int j;
- for(j = 0; pairs[j].name; j++) {
- if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
- *pairs[j].collected9 = str2ull(pairs[i].value);
- break;
- }
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-
-/*
-// ==============
-// --- Poor man cycle counting.
-static unsigned long tsc;
-
-static void begin_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
- tsc = ((unsigned long)d << 32) | (unsigned long)a;
-}
-
-static unsigned long end_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
- return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
-}
-// ===============
-*/
-
-static unsigned long long clk;
-
-static void begin_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return;
- clk = tv.tv_sec * 1000000 + tv.tv_usec;
-}
-
-static unsigned long long end_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return -1;
- return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
-}
-
-int main(void)
-{
- {
- int i;
- for(i = 0; pairs[i].name; i++)
- pairs[i].hash = simple_hash(pairs[i].name);
- }
-
- cache_hash = simple_hash("cache");
- rss_hash = simple_hash("rss");
- rss_huge_hash = simple_hash("rss_huge");
- mapped_file_hash = simple_hash("mapped_file");
- writeback_hash = simple_hash("writeback");
- dirty_hash = simple_hash("dirty");
- swap_hash = simple_hash("swap");
- pgpgin_hash = simple_hash("pgpgin");
- pgpgout_hash = simple_hash("pgpgout");
- pgfault_hash = simple_hash("pgfault");
- pgmajfault_hash = simple_hash("pgmajfault");
- inactive_anon_hash = simple_hash("inactive_anon");
- active_anon_hash = simple_hash("active_anon");
- inactive_file_hash = simple_hash("inactive_file");
- active_file_hash = simple_hash("active_file");
- unevictable_hash = simple_hash("unevictable");
- hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
- total_cache_hash = simple_hash("total_cache");
- total_rss_hash = simple_hash("total_rss");
- total_rss_huge_hash = simple_hash("total_rss_huge");
- total_mapped_file_hash = simple_hash("total_mapped_file");
- total_writeback_hash = simple_hash("total_writeback");
- total_dirty_hash = simple_hash("total_dirty");
- total_swap_hash = simple_hash("total_swap");
- total_pgpgin_hash = simple_hash("total_pgpgin");
- total_pgpgout_hash = simple_hash("total_pgpgout");
- total_pgfault_hash = simple_hash("total_pgfault");
- total_pgmajfault_hash = simple_hash("total_pgmajfault");
- total_inactive_anon_hash = simple_hash("total_inactive_anon");
- total_active_anon_hash = simple_hash("total_active_anon");
- total_inactive_file_hash = simple_hash("total_inactive_file");
- total_active_file_hash = simple_hash("total_active_file");
- total_unevictable_hash = simple_hash("total_unevictable");
-
- // cache functions
- (void)simple_hash2("hello world");
- (void)strcmp("1", "2");
- (void)strtoull("123", NULL, 0);
-
- unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7 = 0, c8 = 0, c9 = 0;
- unsigned long max = 1000000;
-
- begin_clock();
- for(i = 0; i <= max ;i++) test1();
- c1 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test2();
- c2 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test3();
- c3 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test4();
- c4 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test5();
- c5 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test6();
- c6 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test7();
- c7 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test8();
- c8 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test9();
- c9 = end_clock();
-
- for(i = 0; i < 11 ; i++)
- printf("value %lu: %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i], values7[i], values8[i], values9[i]);
-
- printf("\n\nRESULTS\n");
- printf("test1() [1] in %lu usecs: simple system strcmp().\n"
- "test2() [4] in %lu usecs: inline simple_hash() with system strtoull().\n"
- "test3() [5] in %lu usecs: statement expression simple_hash(), system strtoull().\n"
- "test4() [6] in %lu usecs: inline simple_hash(), if-continue checks.\n"
- "test5() [7] in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default prior to ARL).\n"
- "test6() [8] in %lu usecs: adaptive re-sortable array with strtoull() (wow!)\n"
- "test7() [9] in %lu usecs: adaptive re-sortable array with str2ull() (wow!)\n"
- "test8() [2] in %lu usecs: nested loop with strtoull()\n"
- "test9() [3] in %lu usecs: nested loop with str2ull()\n"
- , c1
- , c2
- , c3
- , c4
- , c5
- , c6
- , c7
- , c8
- , c9
- );
-
- return 0;
-}
diff --git a/tests/profile/statsd-stress.c b/tests/profile/statsd-stress.c
deleted file mode 100644
index 435d58d5c..000000000
--- a/tests/profile/statsd-stress.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-#include <stdlib.h>
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-#include <string.h>
-#include <time.h>
-#include <pthread.h>
-
-void diep(char *s)
-{
- perror(s);
- exit(1);
-}
-
-size_t run_threads = 1;
-size_t metrics = 1024;
-
-#define SERVER_IP "127.0.0.1"
-#define PORT 8125
-
-size_t myrand(size_t max) {
- size_t loops = max / RAND_MAX;
- size_t i;
-
- size_t ret = rand();
- for(i = 0; i < loops ;i++)
- ret += rand();
-
- return ret % max;
-}
-
-struct thread_data {
- size_t id;
- struct sockaddr_in *si_other;
- int slen;
- size_t counter;
-};
-
-static void *report_thread(void *__data) {
- struct thread_data *data = (struct thread_data *)__data;
-
- size_t last = 0;
- for (;;) {
- size_t i;
- size_t total = 0;
- for(i = 0; i < run_threads ;i++)
- total += data[i].counter;
-
- printf("%zu metrics/s\n", total-last);
- last = total;
-
- sleep(1);
- printf("\033[F\033[J");
- }
-
- return NULL;
-}
-
-char *types[] = {"g", "c", "m", "ms", "h", "s", NULL};
-// char *types[] = {"g", "c", "C", "h", "ms", NULL}; // brubeck compatible
-
-static void *spam_thread(void *__data) {
- struct thread_data *data = (struct thread_data *)__data;
-
- int s;
- char packet[1024];
-
- if ((s = socket(AF_INET, SOCK_DGRAM, 0))==-1)
- diep("socket");
-
- char **packets = malloc(sizeof(char *) * metrics);
- size_t i, *lengths = malloc(sizeof(size_t) * metrics);
- size_t t;
-
- for(i = 0, t = 0; i < metrics ;i++, t++) {
- if(!types[t]) t = 0;
- char *type = types[t];
-
- lengths[i] = sprintf(packet, "stress.%s.t%zu.m%zu:%zu|%s", type, data->id, i, myrand(metrics), type);
- packets[i] = strdup(packet);
- // printf("packet %zu, of length %zu: '%s'\n", i, lengths[i], packets[i]);
- }
- //printf("\n");
-
- for (;;) {
- for(i = 0; i < metrics ;i++) {
- if (sendto(s, packets[i], lengths[i], 0, (void *)data->si_other, data->slen) < 0) {
- printf("C ==> DROPPED\n");
- return NULL;
- }
- data->counter++;
- }
- }
-
- free(packets);
- free(lengths);
- close(s);
- return NULL;
-}
-
-int main(int argc, char *argv[])
-{
- if (argc != 5) {
- fprintf(stderr, "Usage: '%s THREADS METRICS IP PORT'\n", argv[0]);
- exit(-1);
- }
-
- run_threads = atoi(argv[1]);
- metrics = atoi(argv[2]);
- char *ip = argv[3];
- int port = atoi(argv[4]);
-
- struct thread_data data[run_threads];
- struct sockaddr_in si_other;
- pthread_t threads[run_threads], report;
- size_t i;
-
- srand(time(NULL));
-
- memset(&si_other, 0, sizeof(si_other));
- si_other.sin_family = AF_INET;
- si_other.sin_port = htons(port);
- if (inet_aton(ip, &si_other.sin_addr)==0) {
- fprintf(stderr, "inet_aton() of ip '%s' failed\n", ip);
- exit(1);
- }
-
- for (i = 0; i < run_threads; ++i) {
- data[i].id = i;
- data[i].si_other = &si_other;
- data[i].slen = sizeof(si_other);
- data[i].counter = 0;
- pthread_create(&threads[i], NULL, spam_thread, &data[i]);
- }
-
- printf("\n");
- printf("THREADS : %zu\n", run_threads);
- printf("METRICS : %zu\n", metrics);
- printf("DESTINATION : %s:%d\n", ip, port);
- printf("\n");
- pthread_create(&report, NULL, report_thread, &data);
-
- for (i =0; i < run_threads; ++i)
- pthread_join(threads[i], NULL);
-
- return 0;
-}
diff --git a/tests/profile/test-eval.c b/tests/profile/test-eval.c
deleted file mode 100644
index 144381cf0..000000000
--- a/tests/profile/test-eval.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-/*
- * 1. build netdata (as normally)
- * 2. cd profile/
- * 3. compile with:
- * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o test-eval test-eval.c ../src/log.o ../src/eval.o ../src/common.o ../src/clocks.o ../src/web_buffer.o ../src/storage_number.o -pthread -lm
- */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-#include "database/rrdcalc.h"
-
-void netdata_cleanup_and_exit(int ret) { exit(ret); }
-
-/*
-void indent(int level, int show) {
- int i = level;
- while(i--) printf(" | ");
- if(show) printf(" \\_ ");
- else printf(" \\_ ");
-}
-
-void print_node(EVAL_NODE *op, int level);
-
-void print_value(EVAL_VALUE *v, int level) {
- indent(level, 0);
-
- switch(v->type) {
- case EVAL_VALUE_INVALID:
- printf("value (NOP)\n");
- break;
-
- case EVAL_VALUE_NUMBER:
- printf("value %Lf (NUMBER)\n", v->number);
- break;
-
- case EVAL_VALUE_EXPRESSION:
- printf("value (SUB-EXPRESSION)\n");
- print_node(v->expression, level+1);
- break;
-
- default:
- printf("value (INVALID type %d)\n", v->type);
- break;
-
- }
-}
-
-void print_node(EVAL_NODE *op, int level) {
-
-// if(op->operator != EVAL_OPERATOR_NOP) {
- indent(level, 1);
- if(op->operator) printf("%c (node %d, precedence: %d)\n", op->operator, op->id, op->precedence);
- else printf("NOP (node %d, precedence: %d)\n", op->id, op->precedence);
-// }
-
- int i = op->count;
- while(i--) print_value(&op->ops[i], level + 1);
-}
-
-calculated_number evaluate(EVAL_NODE *op, int depth);
-
-calculated_number evaluate_value(EVAL_VALUE *v, int depth) {
- switch(v->type) {
- case EVAL_VALUE_NUMBER:
- return v->number;
-
- case EVAL_VALUE_EXPRESSION:
- return evaluate(v->expression, depth);
-
- default:
- fatal("I don't know how to handle EVAL_VALUE type %d", v->type);
- }
-}
-
-void print_depth(int depth) {
- static int count = 0;
-
- printf("%d. ", ++count);
- while(depth--) printf(" ");
-}
-
-calculated_number evaluate(EVAL_NODE *op, int depth) {
- calculated_number n1, n2, r;
-
- switch(op->operator) {
- case EVAL_OPERATOR_SIGN_PLUS:
- r = evaluate_value(&op->ops[0], depth);
- break;
-
- case EVAL_OPERATOR_SIGN_MINUS:
- r = -evaluate_value(&op->ops[0], depth);
- break;
-
- case EVAL_OPERATOR_PLUS:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 + n2;
- print_depth(depth);
- printf("%Lf = %Lf + %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_MINUS:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 - n2;
- print_depth(depth);
- printf("%Lf = %Lf - %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_MULTIPLY:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 * n2;
- print_depth(depth);
- printf("%Lf = %Lf * %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_DIVIDE:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 / n2;
- print_depth(depth);
- printf("%Lf = %Lf / %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_NOT:
- n1 = evaluate_value(&op->ops[0], depth);
- r = !n1;
- print_depth(depth);
- printf("%Lf = NOT %Lf\n", r, n1);
- break;
-
- case EVAL_OPERATOR_AND:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 && n2;
- print_depth(depth);
- printf("%Lf = %Lf AND %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_OR:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 || n2;
- print_depth(depth);
- printf("%Lf = %Lf OR %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_GREATER_THAN_OR_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 >= n2;
- print_depth(depth);
- printf("%Lf = %Lf >= %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_LESS_THAN_OR_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 <= n2;
- print_depth(depth);
- printf("%Lf = %Lf <= %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_GREATER:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 > n2;
- print_depth(depth);
- printf("%Lf = %Lf > %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_LESS:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 < n2;
- print_depth(depth);
- printf("%Lf = %Lf < %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_NOT_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 != n2;
- print_depth(depth);
- printf("%Lf = %Lf <> %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 == n2;
- print_depth(depth);
- printf("%Lf = %Lf == %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_EXPRESSION_OPEN:
- printf("BEGIN SUB-EXPRESSION\n");
- r = evaluate_value(&op->ops[0], depth + 1);
- printf("END SUB-EXPRESSION\n");
- break;
-
- case EVAL_OPERATOR_NOP:
- case EVAL_OPERATOR_VALUE:
- r = evaluate_value(&op->ops[0], depth);
- break;
-
- default:
- error("I don't know how to handle operator '%c'", op->operator);
- r = 0;
- break;
- }
-
- return r;
-}
-
-
-void print_expression(EVAL_NODE *op, const char *failed_at, int error) {
- if(op) {
- printf("expression tree:\n");
- print_node(op, 0);
-
- printf("\nevaluation steps:\n");
- evaluate(op, 0);
-
- int error;
- calculated_number ret = expression_evaluate(op, &error);
- printf("\ninternal evaluator:\nSTATUS: %d, RESULT = %Lf\n", error, ret);
-
- expression_free(op);
- }
- else {
- printf("error: %d, failed_at: '%s'\n", error, (failed_at)?failed_at:"<NONE>");
- }
-}
-*/
-
-int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) {
- (void)variable;
- (void)hash;
- (void)rc;
- (void)result;
-
- return 0;
-}
-
-int main(int argc, char **argv) {
- if(argc != 2) {
- fprintf(stderr, "I need an epxression (enclose it in single-quotes (') as a single parameter)\n");
- exit(1);
- }
-
- const char *failed_at = NULL;
- int error;
-
- EVAL_EXPRESSION *exp = expression_parse(argv[1], &failed_at, &error);
- if(!exp)
- printf("\nPARSING FAILED\nExpression: '%s'\nParsing stopped at: '%s'\nParsing error code: %d (%s)\n", argv[1], (failed_at)?((*failed_at)?failed_at:"<END OF EXPRESSION>"):"<NONE>", error, expression_strerror(error));
-
- else {
- printf("\nPARSING OK\nExpression: '%s'\nParsed as : '%s'\nParsing error code: %d (%s)\n", argv[1], exp->parsed_as, error, expression_strerror(error));
-
- if(expression_evaluate(exp)) {
- printf("\nEvaluates to: %Lf\n\n", exp->result);
- }
- else {
- printf("\nEvaluation failed with code %d and message: %s\n\n", exp->error, buffer_tostring(exp->error_msg));
- }
- expression_free(exp);
- }
-
- return 0;
-}
diff --git a/tests/updater_checks.bats b/tests/updater_checks.bats
deleted file mode 100755
index 1a7eeb704..000000000
--- a/tests/updater_checks.bats
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bats
-#
-# This script is responsible for validating
-# updater capabilities after a change
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-
-INSTALLATION="$BATS_TMPDIR/installation"
-ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
-# list of files which need to be checked. Path cannot start from '/'
-FILES="usr/libexec/netdata/plugins.d/go.d.plugin
- usr/libexec/netdata/plugins.d/charts.d.plugin
- usr/libexec/netdata/plugins.d/python.d.plugin
- usr/libexec/netdata/plugins.d/node.d.plugin"
-
-DIRS="usr/sbin/netdata
- etc/netdata
- usr/share/netdata
- usr/libexec/netdata
- var/cache/netdata
- var/lib/netdata
- var/log/netdata"
-
-setup() {
- # If we are not in netdata git repo, at the top level directory, fail
- TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
- CWD=$(git rev-parse --show-cdup || echo "")
- if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./tests/$(basename "$0") from top level directory of git repository"
- exit 1
- fi
-}
-
-@test "install stable netdata using kickstart" {
- kickstart_file="/tmp/kickstart.$$"
- curl -Ss -o ${kickstart_file} https://my-netdata.io/kickstart.sh
- chmod +x ${kickstart_file}
- ${kickstart_file} --dont-wait --dont-start-it --auto-update --install ${INSTALLATION}
-
- # Validate particular files
- for file in $FILES; do
- [ ! -f "$BATS_TMPDIR/$file" ]
- done
-
- # Validate particular directories
- for a_dir in $DIRS; do
- [ ! -d "$BATS_TMPDIR/$a_dir" ]
- done
-
- # Cleanup
- rm -rf ${kickstart_file}
-}
-
-@test "update netdata using the new updater" {
- export ENVIRONMENT_FILE="${ENV}"
- # Run the updater, with the override so that it uses the local repo we have at hand
- # Try to run the installed, if any, otherwise just run the one from the repo
- export NETDATA_LOCAL_TARBAL_OVERRIDE="${PWD}"
- /etc/cron.daily/netdata-updater || ./packaging/installer/netdata-updater.sh
- ! grep "new_installation" "${ENV}"
-}
-
-@test "uninstall netdata using latest uninstaller" {
- ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
- [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
- [ ! -f "/etc/cron.daily/netdata-updater" ]
-}
diff --git a/tests/updater_checks.sh b/tests/updater_checks.sh
deleted file mode 100755
index 9c8b6fa48..000000000
--- a/tests/updater_checks.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env sh
-#
-# Wrapper script that installs the required dependencies
-# for the BATS script to run successfully
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-
-echo "Syncing/updating repository.."
-
-blind_arch_grep_install() {
- # There is a peculiar docker case with arch, where grep is not available
- # This method will have to be triggered blindly, to inject grep so that we can process
- # It starts to become a chicken-egg situation with all the distros..
- echo "* * Workaround hack * *"
- echo "Attempting blind install for archlinux case"
-
- if command -v pacman > /dev/null 2>&1; then
- echo "Executing grep installation"
- pacman -Sy
- pacman --noconfirm --needed -S grep
- fi
-}
-blind_arch_grep_install || echo "Workaround failed, proceed as usual"
-
-running_os="$(cat /etc/os-release |grep '^ID=' | cut -d'=' -f2 | sed -e 's/"//g')"
-
-case "${running_os}" in
-"centos"|"fedora")
- echo "Running on CentOS, updating YUM repository.."
- yum clean all
- yum update -y
-
- echo "Installing extra dependencies.."
- yum install -y epel-release
- yum install -y bats curl
- ;;
-"debian"|"ubuntu")
- echo "Running ${running_os}, updating APT repository"
- apt-get update -y
- apt-get install -y bats curl
- ;;
-"opensuse-leap"|"opensuse-tumbleweed")
- zypper update -y
- zypper install -y bats curl
- ;;
-"arch")
- pacman -Sy
- pacman --noconfirm --needed -S bash-bats curl
- ;;
-"alpine")
- apk update
- apk add bash curl bats
- ;;
-*)
- echo "Running on ${running_os}, no repository preparation done"
- ;;
-esac
-
-# Download and run depednency scriptlet, before anything else
-#
-deps_tool="/tmp/deps_tool.$$.sh"
-curl -Ss -o ${deps_tool} https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh
-if [ -f "${deps_tool}" ]; then
- echo "Running dependency handling script.."
- chmod +x "${deps_tool}"
- ${deps_tool} --non-interactive netdata
- rm -f "${deps_tool}"
- echo "Done!"
-else
- echo "Failed to fetch dependency script, aborting the test"
- exit 1
-fi
-
-echo "Running BATS file.."
-bats --tap tests/updater_checks.bats
diff --git a/tests/urls/request.sh b/tests/urls/request.sh
new file mode 100644
index 000000000..6cbe77384
--- /dev/null
+++ b/tests/urls/request.sh
@@ -0,0 +1,301 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+################################################################################################
+#### ####
+#### GLOBAL VARIABLES ####
+#### ####
+################################################################################################
+
+# The current time
+CT=$(date +'%s')
+
+# The previous time
+PT=$((CT - 30))
+
+# The output directory where we will store the results and error
+OUTDIR="tests"
+OUTEDIR="encoded_tests"
+OUTOPTDIR="options"
+ERRDIR="etests"
+
+################################################################################################
+#### ####
+#### FUNCTIONS ####
+#### ####
+################################################################################################
+
+# Print error message and close script
+netdata_print_error(){
+ echo "Closing due error \"$1\" code \"$2\""
+ exit 1
+}
+
+# Print the header message of the function
+netdata_print_header() {
+ echo "$1"
+}
+
+# Create the main directory where the results will be stored
+netdata_create_directory() {
+ netdata_print_header "Creating directory $1"
+ if [ ! -d "$1" ]; then
+ mkdir "$1"
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot create directory $?"
+ fi
+ else
+ echo "Working with directory $OUTDIR"
+ fi
+}
+
+#Check whether download did not have problem
+netdata_test_download(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot do download of the page $2" $?
+ exit 1
+ fi
+}
+
+#Check whether download had a problem
+netdata_error_test(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -eq 0 ]; then
+ netdata_print_error "The page $2 did not answer with an error" $?
+ exit 1
+ fi
+}
+
+
+# Download information from Netdata
+netdata_download_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$OUTDIR/$3.out" "$1/$2" 2> "$OUTDIR/$3.err"
+ netdata_test_download "$OUTDIR/$3.err" "$1/$2"
+}
+
+netdata_download_various_with_options() {
+ netdata_print_header "Getting options for $2"
+ curl -X OPTIONS -v -k --create-dirs -o "$OUTOPTDIR/$3.out" "$1/$2" 2> "$OUTOPTDIR/$3.err"
+ netdata_test_download "$OUTOPTDIR/$3.err" "$1/$2"
+}
+
+# Download information from Netdata
+netdata_wrong_request_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$ERRDIR/$3.out" "$1/$2" 2> "$ERRDIR/$3.err"
+ netdata_error_test "$ERRDIR/$3.err" "$1/$2"
+}
+
+# Download charts from Netdata
+netdata_download_charts() {
+ curl -v -k --create-dirs -o "$OUTDIR/charts.out" "$1/$2" 2> "$OUTDIR/charts.err"
+ netdata_test_download "$OUTDIR/charts.err" "$1/$2"
+
+ #Rewrite the next
+ grep -w "id" tests/charts.out| cut -d: -f2 | grep "\"," | sed s/,//g | sort
+}
+
+#Test options for a specific chart
+netdata_download_chart() {
+ SEPARATOR="&"
+ EQUAL="="
+ OUTD=$OUTDIR
+ ENCODED=" "
+ for I in $(seq 0 1); do
+ if [ "$I" -eq "1" ] ; then
+ SEPARATOR="%26"
+ EQUAL="%3D"
+ OUTD=$OUTEDIR
+ ENCODED="encoded"
+ fi
+
+ NAME=${3//\"/}
+ netdata_print_header "Getting data for $NAME using $4 $ENCODED"
+
+ LDIR=$OUTD"/"$4
+
+ LURL="$1/$2$EQUAL$NAME"
+
+ NAME=$NAME"_$4"
+
+ curl -v -k --create-dirs -o "$LDIR/$NAME.out" "$LURL" 2> "$LDIR/$NAME.err"
+ netdata_test_download "$LDIR/$NAME.err" "$LURL"
+
+ UFILES=( "points" "before" "after" )
+ COUNTER=0
+ for OPT in "points=100" "before=$PT" "after=$CT" ;
+ do
+ LURL="$LURL$SEPARATOR$OPT"
+ LFILE=$NAME"_${UFILES[$COUNTER]}";
+
+ curl -v -k --create-dirs -o "$LDIR/$LFILE.out" "$LURL" 2> "$LDIR/$LFILE.err"
+ netdata_test_download "$LDIR/$LFILE.err" "$LURL"
+
+ COUNTER=$((COUNTER + 1))
+ done
+
+ LURL="$LURL&group$EQUAL"
+ for OPT in "min" "max" "sum" "median" "stddev" "cv" "ses" "des" "incremental_sum" "average";
+ do
+ TURL=$LURL$OPT
+ TFILE=$NAME"_$OPT";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ for MORE in "jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array";
+ do
+ TURL=$TURL"&format="$MORE
+ TFILE=$NAME"_$OPT""_$MORE";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ done
+ done
+
+ LURL="$LURL$OPT&gtime=60"
+ NFILE=$NAME"_gtime"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&options=percentage"
+ NFILE=$NAME"_percentage"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&dimensions=system%7Cnice"
+ NFILE=$NAME"_dimension"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&label=testing"
+ NFILE=$NAME"_label"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+ done
+}
+
+# Download information from Netdata
+netdata_download_allmetrics() {
+ netdata_print_header "Getting All metrics"
+ LURL="$1/api/v1/allmetrics?format="
+ for FMT in "shell" "prometheus" "prometheus_all_hosts" "json" ;
+ do
+ TURL=$LURL$FMT
+ for OPT in "yes" "no";
+ do
+ if [ "$FMT" == "prometheus" ]; then
+ TURL="$TURL&help=$OPT&types=$OPT&timestamps=$OPT"
+ fi
+ TURL="$TURL&names=$OPT&oldunits=$OPT&hideunits=$OPT&prefix=ND"
+
+ NAME="allmetrics_$FMT"
+ echo "$OUTDIR/$2/$NAME.out"
+ curl -v -k --create-dirs -o "$OUTDIR/$2/$NAME.out" "$TURL" 2> "$OUTDIR/$2/$NAME.err"
+ netdata_test_download "$OUTDIR/$2/$NAME.err" "$TURL"
+ done
+ done
+}
+
+
+################################################################################################
+#### ####
+#### MAIN ROUTINE ####
+#### ####
+################################################################################################
+MURL="http://127.0.0.1:19999"
+
+netdata_create_directory $OUTDIR
+netdata_create_directory $OUTEDIR
+netdata_create_directory $OUTOPTDIR
+netdata_create_directory $ERRDIR
+
+wget --no-check-certificate --execute="robots = off" --mirror --convert-links --no-parent $MURL
+TEST=$?
+if [ $TEST -ne "0" ] ; then
+ echo "Cannot connect to Netdata"
+ exit 1
+fi
+
+netdata_download_various $MURL "netdata.conf" "netdata.conf"
+
+netdata_download_various_with_options $MURL "netdata.conf" "netdata.conf"
+
+netdata_wrong_request_various $MURL "api/v15/info?this%20could%20not%20be%20here" "err_version"
+
+netdata_wrong_request_various $MURL "api/v1/\(*@&$\!$%%5E\)\!$*%&\)\!$*%%5E*\!%5E%\!%5E$%\!%5E%\(\!*%5E*%5E%\(*@&$%5E%\(\!%5E#*&\!^#$*&\!^%\)@\($%^\)\!*&^\(\!*&^#$&#$\)\!$%^\)\!$*%&\)#$\!^#*$^\!\(*#^#\)\!%^\!\)$*%&\!\(*&$\!^#$*&^\!*#^$\!*^\)%\(\!*&$%\)\(\!&#$\!^*#&$^\!*^%\)\!$%\)\!\(&#$\!^#*&^$" "err_version2"
+
+netdata_download_various $MURL "api/v1/info" "info"
+netdata_download_various_with_options $MURL "api/v1/info" "info"
+netdata_download_various $MURL "api/v1/info?this%20could%20not%20be%20here" "err_info"
+
+netdata_print_header "Getting all the netdata charts"
+CHARTS=$( netdata_download_charts "$MURL" "api/v1/charts" )
+WCHARTS=$( netdata_download_charts "$MURL" "api/v1/charts?this%20could%20not%20be%20here" )
+WCHARTS2=$( netdata_download_charts "$MURL" "api/v1/charts%3fthis%20could%20not%20be%20here" )
+
+if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then
+ echo "The number of charts does not match with division not encoded.";
+ exit 2;
+elif [ ${#CHARTS[@]} -ne ${#WCHARTS2[@]} ]; then
+ echo "The number of charts does not match when everything is encoded";
+ exit 3;
+fi
+
+netdata_wrong_request_various $MURL "api/v1/chart" "err_chart_without_chart"
+netdata_wrong_request_various $MURL "api/v1/chart?_=234231424242" "err_chart_arg"
+
+netdata_download_various $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+netdata_download_various_with_options $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+
+netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various_with_options $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts%26_=234231424242" "chart_cpu_with_more_args_encoded2"
+netdata_download_various $MURL "api/v1/chart%3Fchart%3Dcpu.cpu0_interrupts%26_%3D234231424242" "chart_cpu_with_more_args_encoded3"
+
+netdata_create_directory "$OUTDIR/chart"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various $MURL "api/v1/chart?chart=$NAME" "chart/$NAME"
+done
+
+netdata_wrong_request_various $MURL "api/v1/alarm_variables" "err_alarm_variables_without_chart"
+netdata_wrong_request_various $MURL "api/v1/alarm_variables?_=234231424242" "err_alarm_variables_arg"
+netdata_download_various $MURL "api/v1/alarm_variables?chart=cpu.cpu0_interrupts&_=234231424242" "alarm_cpu_with_more_args"
+
+netdata_create_directory "$OUTDIR/alarm_variables"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various $MURL "api/v1/alarm_variables?chart=$NAME" "alarm_variables/$NAME"
+done
+
+netdata_create_directory "$OUTDIR/badge"
+netdata_create_directory "$OUTEDIR/badge"
+for I in $CHARTS ; do
+ netdata_download_chart $MURL "api/v1/badge.svg?chart" "$I" "badge"
+done
+
+netdata_create_directory "$OUTDIR/allmetrics"
+netdata_download_allmetrics $MURL "allmetrics"
+
+netdata_download_various $MURL "api/v1/alarms?all" "alarms_all"
+netdata_download_various $MURL "api/v1/alarms?active" "alarms_active"
+netdata_download_various $MURL "api/v1/alarms" "alarms_nothing"
+
+netdata_download_various $MURL "api/v1/alarm_log?after" "alarm_without"
+netdata_download_various $MURL "api/v1/alarm_log" "alarm_nothing"
+netdata_download_various $MURL "api/v1/alarm_log?after&_=$PT" "alarm_log"
+
+netdata_create_directory "$OUTDIR/data"
+netdata_create_directory "$OUTEDIR/data"
+for I in $CHARTS ; do
+ netdata_download_chart $MURL "api/v1/data?chart" "$I" "data"
+ break;
+done
+
+WHITE='\033[0;37m'
+echo -e "${WHITE}ALL the URLS got 200 as answer!"
+
+exit 0
diff --git a/tests/urls/request.sh.in b/tests/urls/request.sh.in
index fac00bc4e..6cbe77384 100644
--- a/tests/urls/request.sh.in
+++ b/tests/urls/request.sh.in
@@ -212,7 +212,7 @@ netdata_create_directory $OUTEDIR
netdata_create_directory $OUTOPTDIR
netdata_create_directory $ERRDIR
-wget --execute="robots = off" --mirror --convert-links --no-parent http://127.0.0.1:19999
+wget --no-check-certificate --execute="robots = off" --mirror --convert-links --no-parent $MURL
TEST=$?
if [ $TEST -ne "0" ] ; then
echo "Cannot connect to Netdata"
@@ -232,9 +232,9 @@ netdata_download_various_with_options $MURL "api/v1/info" "info"
netdata_download_various $MURL "api/v1/info?this%20could%20not%20be%20here" "err_info"
netdata_print_header "Getting all the netdata charts"
-CHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts" )
-WCHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts?this%20could%20not%20be%20here" )
-WCHARTS2=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts%3fthis%20could%20not%20be%20here" )
+CHARTS=$( netdata_download_charts "$MURL" "api/v1/charts" )
+WCHARTS=$( netdata_download_charts "$MURL" "api/v1/charts?this%20could%20not%20be%20here" )
+WCHARTS2=$( netdata_download_charts "$MURL" "api/v1/charts%3fthis%20could%20not%20be%20here" )
if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then
echo "The number of charts does not match with division not encoded.";
@@ -295,8 +295,6 @@ for I in $CHARTS ; do
break;
done
-#http://arch-esxi:19999/api/v1/(*@&$!$%%5E)!$*%&)!$*%%5E*!%5E%!%5E$%!%5E%(!*%5E*%5E%(*@&$%5E%(!%5E#*&!^#$*&!^%)@($%^)!*&^(!*&^#$&#$)!$%^)!$*%&)#$!^#*$^!(*#^#)!%^!)$*%&!(*&$!^#$*&^!*#^$!*^)%(!*&$%)(!&#$!^*#&$^!*^%)!$%)!(&#$!^#*&^$
-
WHITE='\033[0;37m'
echo -e "${WHITE}ALL the URLS got 200 as answer!"