summaryrefslogtreecommitdiffstats
path: root/daemon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-03 10:23:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-03 10:23:48 +0000
commitcd7ed12292aef11d9062b64f61215174e8cc1860 (patch)
tree9998ab03d153956743d9319cf3a0279b9593ce36 /daemon
parentReleasing debian version 1.16.1-6. (diff)
downloadnetdata-cd7ed12292aef11d9062b64f61215174e8cc1860.tar.xz
netdata-cd7ed12292aef11d9062b64f61215174e8cc1860.zip
Merging upstream version 1.17.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'daemon')
-rw-r--r--daemon/Makefile.in614
-rw-r--r--daemon/README.md190
-rw-r--r--daemon/anonymous-statistics.sh89
-rw-r--r--daemon/config/README.md139
-rwxr-xr-xdaemon/system-info.sh7
-rw-r--r--daemon/unit_test.c354
6 files changed, 1148 insertions, 245 deletions
diff --git a/daemon/Makefile.in b/daemon/Makefile.in
new file mode 100644
index 000000000..e53dbbca8
--- /dev/null
+++ b/daemon/Makefile.in
@@ -0,0 +1,614 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = daemon
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ anonymous-statistics.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_noinst_DATA = \
+ README.md \
+ config/README.md \
+ anonymous-statistics.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ anonymous-statistics.sh \
+ system-info.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu daemon/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu daemon/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/daemon/README.md b/daemon/README.md
index 62cc8c3b2..2facbdda6 100644
--- a/daemon/README.md
+++ b/daemon/README.md
@@ -2,21 +2,21 @@
## Starting netdata
-- You can start netdata by executing it with `/usr/sbin/netdata` (the installer will also start it).
+- You can start Netdata by executing it with `/usr/sbin/netdata` (the installer will also start it).
-- You can stop netdata by killing it with `killall netdata`.
- You can stop and start netdata at any point. Netdata saves on exit its round robbin
- database to `/var/cache/netdata` so that it will continue from where it stopped the last time.
+- You can stop Netdata by killing it with `killall netdata`.
+ You can stop and start Netdata at any point. Netdata saves on exit its round robbin
+ database to `/var/cache/netdata` so that it will continue from where it stopped the last time.
Access to the web site, for all graphs, is by default on port `19999`, so go to:
- ```
- http://127.0.0.1:19999/
- ```
+```
+http://127.0.0.1:19999/
+```
You can get the running config file at any time, by accessing `http://127.0.0.1:19999/netdata.conf`.
-### Starting netdata at boot
+### Starting Netdata at boot
In the `system` directory you can find scripts and configurations for the various distros.
@@ -27,7 +27,7 @@ The installer already installs `netdata.service` if it detects a systemd system.
To install `netdata.service` by hand, run:
```sh
-# stop netdata
+# stop Netdata
killall netdata
# copy netdata.service to systemd
@@ -36,10 +36,10 @@ cp system/netdata.service /etc/systemd/system/
# let systemd know there is a new service
systemctl daemon-reload
-# enable netdata at boot
+# enable Netdata at boot
systemctl enable netdata
-# start netdata
+# start Netdata
systemctl start netdata
```
@@ -48,7 +48,7 @@ systemctl start netdata
In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution documentation. For Ubuntu, this can be done via running the following commands as root.
```sh
-# copy the netdata startup file to /etc/init.d
+# copy the Netdata startup file to /etc/init.d
cp system/netdata-lsb /etc/init.d/netdata
# make sure it is executable
@@ -67,7 +67,7 @@ In the `system` directory you can find `netdata-openrc`. Copy it to the proper p
For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can be installed by running the following commands as root.
```sh
-# copy the netdata startup file to /etc/init.d
+# copy the Netdata startup file to /etc/init.d
cp system/netdata-init-d /etc/init.d/netdata
# make sure it is executable
@@ -77,11 +77,11 @@ chmod +x /etc/init.d/netdata
chkconfig --add netdata
```
-_There have been some recent work on the init script, see PR https://github.com/netdata/netdata/pull/403_
+_There have been some recent work on the init script, see PR <https://github.com/netdata/netdata/pull/403>_
#### other systems
-You can start netdata by running it from `/etc/rc.local` or equivalent.
+You can start Netdata by running it from `/etc/rc.local` or equivalent.
## Command line options
@@ -97,9 +97,9 @@ netdata -h
The program will print the supported command line parameters.
-The command line options of the netdata 1.10.0 version are the following:
-```
+The command line options of the Netdata 1.10.0 version are the following:
+```
^
|.-. .-. .-. .-. . netdata
| '-' '-' '-' '-' real-time performance monitoring, done right!
@@ -182,38 +182,38 @@ The command line options of the netdata 1.10.0 version are the following:
## Log files
-netdata uses 3 log files:
+Netdata uses 3 log files:
-1. `error.log`
-2. `access.log`
-3. `debug.log`
+1. `error.log`
+2. `access.log`
+3. `debug.log`
Any of them can be disabled by setting it to `/dev/null` or `none` in `netdata.conf`.
By default `error.log` and `access.log` are enabled. `debug.log` is only enabled if
-debugging/tracing is also enabled (netdata needs to be compiled with debugging enabled).
+debugging/tracing is also enabled (Netdata needs to be compiled with debugging enabled).
Log files are stored in `/var/log/netdata/` by default.
#### error.log
-The `error.log` is the `stderr` of the netdata daemon and all external plugins run by netdata.
+The `error.log` is the `stderr` of the `netdata` daemon and all external plugins run by netdata.
-So if any process, in the netdata process tree, writes anything to its standard error,
+So if any process, in the Netdata process tree, writes anything to its standard error,
it will appear in `error.log`.
-For most netdata programs (including standard external plugins shipped by netdata), the
+For most Netdata programs (including standard external plugins shipped by netdata), the
following lines may appear:
-tag|description
-:---:|:----
-`INFO`|Something important the user should know.
-`ERROR`|Something that might disable a part of netdata.<br/>The log line includes `errno` (if it is not zero).
-`FATAL`|Something prevented a program from running.<br/>The log line includes `errno` (if it is not zero) and the program exited.
+| tag|description|
+|:-:|:----------|
+| `INFO`|Something important the user should know.|
+| `ERROR`|Something that might disable a part of netdata.<br/>The log line includes `errno` (if it is not zero).|
+| `FATAL`|Something prevented a program from running.<br/>The log line includes `errno` (if it is not zero) and the program exited.|
So, when auto-detection of data collection fail, `ERROR` lines are logged and the relevant modules
are disabled, but the program continues to run.
-When a netdata program cannot run at all, a `FATAL` line is logged.
+When a Netdata program cannot run at all, a `FATAL` line is logged.
#### access.log
@@ -225,34 +225,32 @@ DATE: ID: (sent/all = SENT_BYTES/ALL_BYTES bytes PERCENT_COMPRESSION%, prep/sent
where:
- - `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata.
- - `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header.
- - `ALL_BYTES` is the number of bytes of the response, before compression.
- - `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression.
- - `PREP_TIME` is the time in milliseconds needed to prepared the response.
- - `SENT_TIME` is the time in milliseconds needed to sent the response to the client.
- - `TOTAL_TIME` is the total time the request was inside netdata (from the first byte of the request to the last byte of the response).
- - `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call).
-
+- `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata.
+- `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header.
+- `ALL_BYTES` is the number of bytes of the response, before compression.
+- `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression.
+- `PREP_TIME` is the time in milliseconds needed to prepared the response.
+- `SENT_TIME` is the time in milliseconds needed to sent the response to the client.
+- `TOTAL_TIME` is the total time the request was inside Netdata (from the first byte of the request to the last byte of the response).
+- `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call).
#### debug.log
See [debugging](#debugging).
-
## OOM Score
-netdata runs with `OOMScore = 1000`. This means netdata will be the first to be killed when your
+Netdata runs with `OOMScore = 1000`. This means Netdata will be the first to be killed when your
server runs out of memory.
-You can set netdata OOMScore in `netdata.conf`, like this:
+You can set Netdata OOMScore in `netdata.conf`, like this:
```
[global]
OOM score = 1000
```
-netdata logs its OOM score when it starts:
+Netdata logs its OOM score when it starts:
```sh
# grep OOM /var/log/netdata/error.log
@@ -261,16 +259,16 @@ netdata logs its OOM score when it starts:
#### OOM score and systemd
-netdata will not be able to lower its OOM Score below zero, when it is started as the `netdata`
+Netdata will not be able to lower its OOM Score below zero, when it is started as the `netdata`
user (systemd case).
-To allow netdata control its OOM Score in such cases, you will need to edit
+To allow Netdata control its OOM Score in such cases, you will need to edit
`netdata.service` and set:
```
[Service]
-# The minimum netdata Out-Of-Memory (OOM) score.
-# netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
+# The minimum Netdata Out-Of-Memory (OOM) score.
+# Netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
# To decrease it, set the minimum here and set the same or a higher value in netdata.conf.
# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata).
OOMScoreAdjust=-1000
@@ -278,7 +276,7 @@ OOMScoreAdjust=-1000
Run `systemctl daemon-reload` to reload these changes.
-The above, sets and OOMScore for netdata to `-1000`, so that netdata can increase it via
+The above, sets and OOMScore for Netdata to `-1000`, so that Netdata can increase it via
`netdata.conf`.
If you want to control it entirely via systemd, you can set in `netdata.conf`:
@@ -290,12 +288,11 @@ If you want to control it entirely via systemd, you can set in `netdata.conf`:
Using the above, whatever OOM Score you have set at `netdata.service` will be maintained by netdata.
-
## Netdata process scheduling policy
-By default netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts.
+By default Netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts.
-You can set netdata scheduling policy in `netdata.conf`, like this:
+You can set Netdata scheduling policy in `netdata.conf`, like this:
```
[global]
@@ -304,14 +301,14 @@ You can set netdata scheduling policy in `netdata.conf`, like this:
You can use the following:
-policy|description
-:-----:|:--------
-`idle`|use CPU only when there is spare - this is lower than nice 19 - it is the default for netdata and it is so low that netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts.
-`other`<br/>or<br/>`nice`|this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata.
-`batch`|This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions.
-`fifo`|`fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing.
-`rr`|a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum.
-`keep`<br/>or<br/>`none`|do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd).
+| policy|description|
+|:----:|:----------|
+| `idle`|use CPU only when there is spare - this is lower than nice 19 - it is the default for Netdata and it is so low that Netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts.|
+| `other`<br/>or<br/>`nice`|this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata.|
+| `batch`|This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions.|
+| `fifo`|`fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing.|
+| `rr`|a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum.|
+| `keep`<br/>or<br/>`none`|do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd).|
For more information see `man sched`.
@@ -337,30 +334,30 @@ When the policy is set to `other`, `nice`, or `batch`, the following will appear
## scheduling settings and systemd
-netdata will not be able to set its scheduling policy and priority to more important values when it is started as the `netdata` user (systemd case).
+Netdata will not be able to set its scheduling policy and priority to more important values when it is started as the `netdata` user (systemd case).
You can set these settings at `/etc/systemd/system/netdata.service`:
```
[Service]
-# By default netdata switches to scheduling policy idle, which makes it use CPU, only
+# By default Netdata switches to scheduling policy idle, which makes it use CPU, only
# when there is spare available.
# Valid policies: other (the system default) | batch | idle | fifo | rr
#CPUSchedulingPolicy=other
-# This sets the maximum scheduling priority netdata can set (for policies: rr and fifo).
-# netdata (via [global].process scheduling priority in netdata.conf) can only lower this value.
+# This sets the maximum scheduling priority Netdata can set (for policies: rr and fifo).
+# Netdata (via [global].process scheduling priority in netdata.conf) can only lower this value.
# Priority gets values 1 (lowest) to 99 (highest).
#CPUSchedulingPriority=1
# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata.
-# netdata (via [global].process nice level in netdata.conf) can only increase the value set here.
+# Netdata (via [global].process nice level in netdata.conf) can only increase the value set here.
#Nice=0
```
Run `systemctl daemon-reload` to reload these changes.
-Now, tell netdata to keep these settings, as set by systemd, by editing `netdata.conf` and setting:
+Now, tell Netdata to keep these settings, as set by systemd, by editing `netdata.conf` and setting:
```
[global]
@@ -369,10 +366,9 @@ Now, tell netdata to keep these settings, as set by systemd, by editing `netdata
Using the above, whatever scheduling settings you have set at `netdata.service` will be maintained by netdata.
+#### Example 1: Netdata with nice -1 on non-systemd systems
-#### Example 1: netdata with nice -1 on non-systemd systems
-
-On a system that is not based on systemd, to make netdata run with nice level -1 (a little bit higher to the default for all programs), edit netdata.conf and set:
+On a system that is not based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all programs), edit `netdata.conf` and set:
```
[global]
@@ -386,10 +382,9 @@ then execute this to restart netdata:
sudo service netdata restart
```
+#### Example 2: Netdata with nice -1 on systemd systems
-#### Example 2: netdata with nice -1 on systemd systems
-
-On a system that is based on systemd, to make netdata run with nice level -1 (a little bit higher to the default for all programs), edit netdata.conf and set:
+On a system that is based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all programs), edit `netdata.conf` and set:
```
[global]
@@ -415,9 +410,9 @@ sudo systemctl restart netdata
You may notice that netdata's virtual memory size, as reported by `ps` or `/proc/pid/status` (or even netdata's applications virtual memory chart) is unrealistically high.
-For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. Similar values may be reported for netdata plugins too.
+For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. Similar values may be reported for Netdata plugins too.
-Check this for example: A netdata installation with default settings on Ubuntu 16.04LTS. The top chart is **real memory used**, while the bottom one is **virtual memory**:
+Check this for example: A Netdata installation with default settings on Ubuntu 16.04LTS. The top chart is **real memory used**, while the bottom one is **virtual memory**:
![image](https://cloud.githubusercontent.com/assets/2662304/19013772/5eb7173e-87e3-11e6-8f2b-a2ccfeb06faf.png)
@@ -431,19 +426,18 @@ number of threads running.
The system does this for speed. Having a separate memory arena for each thread, allows the
threads to run in parallel in multi-core systems, without any locks between them.
-This behaviour is system specific. For example, the chart above when running netdata on alpine
-linux (that uses **musl** instead of **glibc**) is this:
+This behaviour is system specific. For example, the chart above when running Netdata on Alpine Linux (that uses **musl** instead of **glibc**) is this:
![image](https://cloud.githubusercontent.com/assets/2662304/19013807/7cf5878e-87e4-11e6-9651-082e68701eab.png)
**Can we do anything to lower it?**
-Since netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory on start, so that while repeatedly collects data it does not do memory allocations), it already instructs the system memory allocator to minimize the memory arenas for each thread. We have also added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418)
+Since Netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory on start, so that while repeatedly collects data it does not do memory allocations), it already instructs the system memory allocator to minimize the memory arenas for each thread. We have also added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418)
to allow you tweak these settings: `glibc malloc arena max for plugins` and `glibc malloc arena max for netdata`.
However, even if we instructed the memory allocator to use just one arena, it seems it allocates an arena per thread.
-netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the glibc memory allocator in this aspect.
+Netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the glibc memory allocator in this aspect.
**Is this a problem?**
@@ -452,53 +446,52 @@ No, it is not.
Linux reserves real memory (physical RAM) in pages (on x86 machines pages are 4KB each).
So even if the system memory allocator is allocating huge amounts of virtual memory,
only the 4KB pages that are actually used are reserving physical RAM. The **real memory** chart
-on netdata application section, shows the amount of physical memory these pages occupy(it
+on Netdata application section, shows the amount of physical memory these pages occupy(it
accounts the whole pages, even if parts of them are actually used).
-
## Debugging
-When you compile netdata with debugging:
+When you compile Netdata with debugging:
-1. compiler optimizations for your CPU are disabled (netdata will run somewhat slower)
+1. compiler optimizations for your CPU are disabled (Netdata will run somewhat slower)
-2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is printed by default. netdata allows you to select which sections of netdata you want to trace. Tracing is activated via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find the options supported at [log.h](../libnetdata/log/log.h). They are the `D_*` defines. The value `0xffffffffffffffff` will enable all possible debug flags.
+2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is printed by default. Netdata allows you to select which sections of Netdata you want to trace. Tracing is activated via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find the options supported at [log.h](../libnetdata/log/log.h). They are the `D_*` defines. The value `0xffffffffffffffff` will enable all possible debug flags.
-Once netdata is compiled with debugging and tracing is enabled for a few sections, the file `/var/log/netdata/debug.log` will contain the messages.
+Once Netdata is compiled with debugging and tracing is enabled for a few sections, the file `/var/log/netdata/debug.log` will contain the messages.
> Do not forget to disable tracing (`debug flags = 0`) when you are done tracing. The file `debug.log` can grow too fast.
-#### compiling netdata with debugging
+#### compiling Netdata with debugging
-To compile netdata with debugging, use this:
+To compile Netdata with debugging, use this:
```sh
-# step into the netdata source directory
+# step into the Netdata source directory
cd /usr/src/netdata.git
# run the installer with debugging enabled
CFLAGS="-O1 -ggdb -DNETDATA_INTERNAL_CHECKS=1" ./netdata-installer.sh
```
-The above will compile and install netdata with debugging info embedded. You can now use `debug flags` to set the section(s) you need to trace.
+The above will compile and install Netdata with debugging info embedded. You can now use `debug flags` to set the section(s) you need to trace.
#### debugging crashes
-We have made the most to make netdata crash free. If however, netdata crashes on your system, it would be very helpful to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is quite large to find such an issue by just objerving it).
+We have made the most to make Netdata crash free. If however, Netdata crashes on your system, it would be very helpful to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is quite large to find such an issue by just objerving it).
-To provide stack traces, **you need to have netdata compiled with debugging**. There is no need to enable any tracing (`debug flags`).
+To provide stack traces, **you need to have Netdata compiled with debugging**. There is no need to enable any tracing (`debug flags`).
Then you need to be in one of the following 2 cases:
-1. netdata crashes and you have a core dump
+1. Netdata crashes and you have a core dump
-2. you can reproduce the crash
+2. you can reproduce the crash
If you are not on these cases, you need to find a way to be (i.e. if your system does not produce core dumps, check your distro documentation to enable them).
-#### netdata crashes and you have a core dump
+#### Netdata crashes and you have a core dump
-> you need to have netdata compiled with debugging info for this to work (check above)
+> you need to have Netdata compiled with debugging info for this to work (check above)
Run the following command and post the output on a github issue.
@@ -506,9 +499,9 @@ Run the following command and post the output on a github issue.
gdb $(which netdata) /path/to/core/dump
```
-#### you can reproduce a netdata crash on your system
+#### you can reproduce a Netdata crash on your system
-> you need to have netdata compiled with debugging info for this to work (check above)
+> you need to have Netdata compiled with debugging info for this to work (check above)
Install the package `valgrind` and run:
@@ -516,7 +509,6 @@ Install the package `valgrind` and run:
valgrind $(which netdata) -D
```
-netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the stack trace. Open a new github issue and post the output.
-
+Netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the stack trace. Open a new github issue and post the output.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/anonymous-statistics.sh b/daemon/anonymous-statistics.sh
new file mode 100644
index 000000000..8c5a77f5e
--- /dev/null
+++ b/daemon/anonymous-statistics.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env sh
+
+# Valid actions:
+
+# - FATAL - netdata exited due to a fatal condition
+# ACTION_RESULT -- program name and thread tag
+# ACTION_DATA -- fmt, args passed to fatal
+# - START - netdata started
+# ACTION_DATA -- nan
+# - EXIT - installation action
+# ACTION_DATA -- ret value of
+
+ACTION="${1}"
+ACTION_RESULT="${2}"
+ACTION_DATA="${3}"
+ACTION_DATA=$(echo "${ACTION_DATA}" | tr '"' "'")
+
+# -------------------------------------------------------------------------------------------------
+# check opt-out
+
+if [ -f "/etc/netdata/.opt-out-from-anonymous-statistics" ]; then
+ exit 0
+fi
+
+# Shorten version for easier reporting
+NETDATA_VERSION=$(echo "${NETDATA_VERSION}" | sed 's/-.*//g' | tr -d 'v')
+
+# -------------------------------------------------------------------------------------------------
+# send the anonymous statistics to GA
+# https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
+if [ -n "$(command -v curl 2>/dev/null)" ]; then
+ curl -X POST -Ss --max-time 2 \
+ --data "v=1" \
+ --data "tid=UA-64295674-3" \
+ --data "aip=1" \
+ --data "ds=shell" \
+ --data-urlencode "cid=${NETDATA_REGISTRY_UNIQUE_ID}" \
+ --data-urlencode "cs=${NETDATA_REGISTRY_UNIQUE_ID}" \
+ --data "t=event" \
+ --data "ni=1" \
+ --data "an=anonymous-statistics" \
+ --data-urlencode "av=${NETDATA_VERSION}" \
+ --data-urlencode "ec=${ACTION}" \
+ --data-urlencode "ea=${ACTION_RESULT}" \
+ --data-urlencode "el=${ACTION_DATA}" \
+ --data-urlencode "cd1=${NETDATA_SYSTEM_OS_NAME}" \
+ --data-urlencode "cd2=${NETDATA_SYSTEM_OS_ID}" \
+ --data-urlencode "cd3=${NETDATA_SYSTEM_OS_ID_LIKE}" \
+ --data-urlencode "cd4=${NETDATA_SYSTEM_OS_VERSION}" \
+ --data-urlencode "cd5=${NETDATA_SYSTEM_OS_VERSION_ID}" \
+ --data-urlencode "cd6=${NETDATA_SYSTEM_OS_DETECTION}" \
+ --data-urlencode "cd7=${NETDATA_SYSTEM_KERNEL_NAME}" \
+ --data-urlencode "cd8=${NETDATA_SYSTEM_KERNEL_VERSION}" \
+ --data-urlencode "cd9=${NETDATA_SYSTEM_ARCHITECTURE}" \
+ --data-urlencode "cd10=${NETDATA_SYSTEM_VIRTUALIZATION}" \
+ --data-urlencode "cd11=${NETDATA_SYSTEM_VIRT_DETECTION}" \
+ --data-urlencode "cd12=${NETDATA_SYSTEM_CONTAINER}" \
+ --data-urlencode "cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}" \
+ "https://www.google-analytics.com/collect" >/dev/null 2>&1
+else
+ wget -q -O - --timeout=1 "https://www.google-analytics.com/collect?\
+&v=1\
+&tid=UA-64295674-3\
+&aip=1\
+&ds=shell\
+&cid=${NETDATA_REGISTRY_UNIQUE_ID}\
+&cs=${NETDATA_REGISTRY_UNIQUE_ID}\
+&t=event\
+&ni=1\
+&an=anonymous-statistics\
+&av=${NETDATA_VERSION}\
+&ec=${ACTION}\
+&ea=${ACTION_RESULT}\
+&el=${ACTION_DATA}\
+&cd1=${NETDATA_SYSTEM_OS_NAME}\
+&cd2=${NETDATA_SYSTEM_OS_ID}\
+&cd3=${NETDATA_SYSTEM_OS_ID_LIKE}\
+&cd4=${NETDATA_SYSTEM_OS_VERSION}\
+&cd5=${NETDATA_SYSTEM_OS_VERSION_ID}\
+&cd6=${NETDATA_SYSTEM_OS_DETECTION}\
+&cd7=${NETDATA_SYSTEM_KERNEL_NAME}\
+&cd8=${NETDATA_SYSTEM_KERNEL_VERSION}\
+&cd9=${NETDATA_SYSTEM_ARCHITECTURE}\
+&cd10=${NETDATA_SYSTEM_VIRTUALIZATION}\
+&cd11=${NETDATA_SYSTEM_VIRT_DETECTION}\
+&cd12=${NETDATA_SYSTEM_CONTAINER}\
+&cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}\
+" > /dev/null 2>&1
+fi
diff --git a/daemon/config/README.md b/daemon/config/README.md
index c36a5b6db..dd032dbb7 100644
--- a/daemon/config/README.md
+++ b/daemon/config/README.md
@@ -1,6 +1,5 @@
# Daemon configuration
-
<details markdown="1"><summary>The daemon configuration file is read from `/etc/netdata/netdata.conf`.</summary>
Depending on your installation method, Netdata will have been installed either directly under `/`, or under `/opt/netdata`. The paths mentioned here and in the documentation in general assume that your installation is under `/`. If it is not, you will find the exact same paths under `/opt/netdata` as well. (i.e. `/etc/netdata` will be `/opt/netdata/etc/netdata`).</details>
@@ -8,21 +7,21 @@ This config file **is not needed by default**. Netdata works fine out of the box
`netdata.conf` has sections stated with `[section]`. You will see the following sections:
-1. `[global]` to [configure](#global-section-options) the [netdata daemon](../).
-2. `[web]` to [configure the web server](../../web/server).
-3. `[plugins]` to [configure](#plugins-section-options) which [collectors](../../collectors) to use and PATH settings.
-4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](../../health)
-5. `[registry]` for the [netdata registry](../../registry).
-6. `[backend]` to set up [streaming and replication](../../streaming) options.
-7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin).
-8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
-9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
+1. `[global]` to [configure](#global-section-options) the [Netdata daemon](../).
+2. `[web]` to [configure the web server](../../web/server).
+3. `[plugins]` to [configure](#plugins-section-options) which [collectors](../../collectors) to use and PATH settings.
+4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](../../health)
+5. `[registry]` for the [Netdata registry](../../registry).
+6. `[backend]` to set up [streaming and replication](../../streaming) options.
+7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin).
+8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
+9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
-The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your netdata server, netdata will add a comment on settings it does not currently use.
+The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your Netdata server, Netdata will add a comment on settings it does not currently use.
## Applying changes
-After `netdata.conf` has been modified, netdata needs to be restarted for changes to apply:
+After `netdata.conf` has been modified, Netdata needs to be restarted for changes to apply:
```bash
sudo service netdata restart
@@ -40,41 +39,41 @@ Please note that your data history will be lost if you have modified `history` p
### [global] section options
-setting | default | info
-:------:|:-------:|:----
-process scheduling policy | `keep` | See [netdata process scheduling policy](../#netdata-process-scheduling-policy)
-OOM score | `1000` | See [OOM score](../#oom-score)
-glibc malloc arena max for plugins | `1` | See [Virtual memory](../#virtual-memory).
-glibc malloc arena max for netdata | `1` | See [Virtual memory](../#virtual-memory).
-hostname | auto-detected | The hostname of the computer running netdata.
-history | `3996` | The number of entries the netdata daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information.
-update every | `1` | The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance).
-config directory | `/etc/netdata` | The directory configuration files are kept.
-stock config directory | `/usr/lib/netdata/conf.d` |
-log directory | `/var/log/netdata` | The directory in which the [log files](../#log-files) are kept.
-web files directory | `/usr/share/netdata/web` | The directory the web static files are kept.
-cache directory | `/var/cache/netdata` | The directory the memory database will be stored if and when netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.
-lib directory | `/var/lib/netdata` | Contains the alarm log and the netdata instance guid.
-home directory | `/var/cache/netdata` | Contains the db files for the collected metrics
-plugins directory | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.
-memory mode | `save` | When set to `save` netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of netdata). When set to `dbengine` it behaves similarly to `map` but with much better disk and memory efficiency, however, with higher overhead. When set to `ram` the round robin database will be temporary and it will be lost when netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix | | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).
-memory deduplication (ksm) | `yes` | When set to `yes`, netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm)
-TZ environment variable | `:/etc/localtime` | Where to find the timezone
-timezone | auto-detected | The timezone retrieved from the environment variable
-debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging).
-debug log | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging).
-error log | `/var/log/netdata/error.log` | The filename to save error messages for netdata daemon and all plugins (`stderr` is sent here for all netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.
-access log | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.
-errors flood protection period | `1200` | UNUSED - Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`.
-errors to trigger flood protection | `200` | UNUSED - Number of errors written to the log in `errors flood protection period` sec before flood protection is activated.
-run as user | `netdata` | The user netdata will run as.
-pthread stack size | auto-detected |
-cleanup obsolete charts after seconds | `3600` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions
-gap when lost iterations above | `1` |
-cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote netdata host (slave) that is no longer sending data.
-delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions
-delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal.
-enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero.
+| setting|default|info|||
+|:-----:|:-----:|:---|---|---|
+| process scheduling policy|`keep`|See [Netdata process scheduling policy](../#netdata-process-scheduling-policy)|||
+| OOM score|`1000`|See [OOM score](../#oom-score)|||
+| glibc malloc arena max for plugins|`1`|See [Virtual memory](../#virtual-memory).|||
+| glibc malloc arena max for Netdata|`1`|See [Virtual memory](../#virtual-memory).|||
+| hostname|auto-detected|The hostname of the computer running Netdata.|||
+| history|`3996`|The number of entries the `netdata` daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information.|||
+| update every|`1`|The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance).|||
+| config directory|`/etc/netdata`|The directory configuration files are kept.|||
+| stock config directory|`/usr/lib/netdata/conf.d`||||
+| log directory|`/var/log/netdata`|The directory in which the [log files](../#log-files) are kept.|||
+| web files directory|`/usr/share/netdata/web`|The directory the web static files are kept.|||
+| cache directory|`/var/cache/netdata`|The directory the memory database will be stored if and when Netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.|||
+| lib directory|`/var/lib/netdata`|Contains the alarm log and the Netdata instance guid.|||
+| home directory|`/var/cache/netdata`|Contains the db files for the collected metrics|||
+| plugins directory|`"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"`|The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.|||
+| memory mode|`save`|When set to `save` Netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of Netdata). When set to `dbengine` it behaves similarly to `map` but with much better disk and memory efficiency, however, with higher overhead. When set to `ram` the round robin database will be temporary and it will be lost when Netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix||This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).|
+| memory deduplication (ksm)|`yes`|When set to `yes`, Netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm)|||
+| TZ environment variable|`:/etc/localtime`|Where to find the timezone|||
+| timezone|auto-detected|The timezone retrieved from the environment variable|||
+| debug flags|`0x0000000000000000`|Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging).|||
+| debug log|`/var/log/netdata/debug.log`|The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging).|||
+| error log|`/var/log/netdata/error.log`|The filename to save error messages for Netdata daemon and all plugins (`stderr` is sent here for all Netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.|||
+| access log|`/var/log/netdata/access.log`|The filename to save the log of web clients accessing Netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.|||
+| errors flood protection period|`1200`|UNUSED - Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`.|||
+| errors to trigger flood protection|`200`|UNUSED - Number of errors written to the log in `errors flood protection period` sec before flood protection is activated.|||
+| run as user|`netdata`|The user Netdata will run as.|||
+| pthread stack size|auto-detected||||
+| cleanup obsolete charts after seconds|`3600`|See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions|||
+| gap when lost iterations above|`1`||||
+| cleanup orphan hosts after seconds|`3600`|How long to wait until automatically removing from the DB a remote Netdata host (slave) that is no longer sending data.|||
+| delete obsolete charts files|`yes`|See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions|||
+| delete orphan hosts files|`yes`|Set to `no` to disable non-responsive host removal.|||
+| enable zero metrics|`no`|Set to `yes` to show charts when all their metrics are zero.|||
### [web] section options
@@ -86,13 +85,13 @@ In this section you will see be a boolean (`yes`/`no`) option for each plugin (e
Additionally, there will be the following options:
-setting | default | info
-:------:|:-------:|:----
-PATH environment variable | `auto-detected` |
-PYTHONPATH environment variable | | Used to set a custom python path
-enable running new plugins | `yes` | When set to `yes`, netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes`
-check for new plugins every | 60 | The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for netdata.
-checks | `no` | This is a debugging plugin for the internal latency
+| setting|default|info|
+|:-----:|:-----:|:---|
+| PATH environment variable|`auto-detected`||
+| PYTHONPATH environment variable||Used to set a custom python path|
+| enable running new plugins|`yes`|When set to `yes`, Netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes`|
+| check for new plugins every|60|The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for Netdata.|
+| checks|`no`|This is a debugging plugin for the internal latency|
### [health] section options
@@ -102,16 +101,16 @@ Specific alarms are configured in per-collector config files under the `health.d
[Alarm notifications](../../health/notifications/#netdata-alarm-notifications) are configured in `health_alarm_notify.conf`.
-setting | default | info
-:------:|:-------:|:----
-enabled | `yes` | Set to `no` to disable all alarms and notifications
-in memory max health log entries | 1000 | Size of the alarm history held in RAM
-script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).
-stock health configuration directory | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alarm configuration files for each collector
-health configuration directory | `/etc/netdata/health.d` | The directory containing the user alarm configuration files, to override the stock configurations
-run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated.
-postpone alarms during hibernation for seconds | `60` | Prevents false alarms. May need to be increased if you get alarms during hibernation.
-rotate log every lines | 2000 | Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where `<lib directory>` is the one configured in the [[global] section](#global-section-options)
+| setting|default|info|
+|:-----:|:-----:|:---|
+| enabled|`yes`|Set to `no` to disable all alarms and notifications|
+| in memory max health log entries|1000|Size of the alarm history held in RAM|
+| script to execute on alarm|`/usr/libexec/netdata/plugins.d/alarm-notify.sh`|The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).|
+| stock health configuration directory|`/usr/lib/netdata/conf.d/health.d`|Contains the stock alarm configuration files for each collector|
+| health configuration directory|`/etc/netdata/health.d`|The directory containing the user alarm configuration files, to override the stock configurations|
+| run at least every seconds|`10`|Controls how often all alarm conditions should be evaluated.|
+| postpone alarms during hibernation for seconds|`60`|Prevents false alarms. May need to be increased if you get alarms during hibernation.|
+| rotate log every lines|2000|Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where `<lib directory>` is the one configured in the [\[global\] section](#global-section-options)|
### [registry] section options
@@ -129,16 +128,16 @@ The configuration options for plugins appear in sections following the pattern `
Most internal plugins will provide additional options. Check [Internal Plugins](../../collectors/) for more information.
-Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
#### External plugins
External plugins will have only 2 options at `netdata.conf`:
-setting | default | info
-:------:|:-------:|:----
-update every|the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../docs/Performance.md#performance).
-command options|*empty*|Additional command line options to pass to the plugin.
+| setting | default | info |
+| :-----:|:-----:|:---|
+| update every | the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../docs/Performance.md#performance).|
+| command options | _empty_ | Additional command line options to pass to the plugin.|
External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation.
@@ -146,4 +145,4 @@ External plugins that need additional configuration may support a dedicated file
In this section you will find a separate subsection for each chart shown on the dashboard. You can control all aspects of a specific chart here. You can understand what each option does by reading [how charts are defined](../../collectors/plugins.d/#chart). If you don't know how to find the name of a chart, you can learn about it [here](../../docs/Charts.md).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/system-info.sh b/daemon/system-info.sh
index 1f5bc52b1..6fab741fd 100755
--- a/daemon/system-info.sh
+++ b/daemon/system-info.sh
@@ -22,8 +22,8 @@ if [ "${KERNEL_NAME}" = "Darwin" ]; then
OIFS="$IFS"
IFS=$'\n'
set $(sw_vers) > /dev/null
- NAME=$(echo $1 | tr "\n" ' ' | sed 's/ProductName:[ ]*//')
- VERSION=$(echo $2 | tr "\n" ' ' | sed 's/ProductVersion:[ ]*//')
+ NAME=$(echo $1 | tr "\n\t" ' ' | sed -e 's/ProductName:[ ]*//' -e 's/[ ]*$//')
+ VERSION=$(echo $2 | tr "\n\t" ' ' | sed -e 's/ProductVersion:[ ]*//' -e 's/[ ]*$//')
ID="mac"
ID_LIKE="mac"
OS_DETECTION="sw_vers"
@@ -44,7 +44,8 @@ else
if [ "${NAME}" = "unknown" ]; then NAME="${DISTRIB_ID}"; fi
if [ "${VERSION}" = "unknown" ]; then VERSION="${DISTRIB_RELEASE}"; fi
if [ "${ID}" = "unknown" ]; then ID="${DISTRIB_CODENAME}"; fi
- elif [ -n "$(command -v lsb_release 2>/dev/null)" ]; then
+ fi
+ if [ -n "$(command -v lsb_release 2>/dev/null)" ]; then
if [ "${OS_DETECTION}" = "unknown" ]; then OS_DETECTION="lsb_release"; else OS_DETECTION="Mixed"; fi
if [ "${NAME}" = "unknown" ]; then NAME="$(lsb_release -is 2>/dev/null)"; fi
if [ "${VERSION}" = "unknown" ]; then VERSION="$(lsb_release -rs 2>/dev/null)"; fi
diff --git a/daemon/unit_test.c b/daemon/unit_test.c
index f9b58ce6b..1c84022c0 100644
--- a/daemon/unit_test.c
+++ b/daemon/unit_test.c
@@ -1581,34 +1581,12 @@ static inline void rrddim_set_by_pointer_fake_time(RRDDIM *rd, collected_number
if(unlikely(v > rd->collected_value_max)) rd->collected_value_max = v;
}
-int test_dbengine(void)
+static RRDHOST *dbengine_rrdhost_find_or_create(char *name)
{
- const int CHARTS = 128;
- const int DIMS = 16; /* That gives us 2048 metrics */
- const int POINTS = 16384; /* This produces 128MiB of metric data */
- const int QUERY_BATCH = 4096;
- uint8_t same;
- int i, j, k, c, errors;
- RRDHOST *host = NULL;
- RRDSET *st[CHARTS];
- RRDDIM *rd[CHARTS][DIMS];
- char name[101];
- time_t time_now;
- collected_number last;
- struct rrddim_query_handle handle;
- calculated_number value, expected;
- storage_number n;
-
- error_log_limit_unlimited();
- fprintf(stderr, "\nRunning DB-engine test\n");
-
- default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
-
- debug(D_RRDHOST, "Initializing localhost with hostname 'unittest-dbengine'");
- host = rrdhost_find_or_create(
- "unittest-dbengine"
- , "unittest-dbengine"
- , "unittest-dbengine"
+ return rrdhost_find_or_create(
+ name
+ , name
+ , name
, os_type
, netdata_configured_timezone
, config_get(CONFIG_SECTION_BACKEND, "host tags", "")
@@ -1624,15 +1602,33 @@ int test_dbengine(void)
, default_rrdpush_send_charts_matching
, NULL
);
- if (NULL == host)
- return 1;
+}
+
+// costants for test_dbengine
+static const int CHARTS = 64;
+static const int DIMS = 16; // That gives us 64 * 16 = 1024 metrics
+#define REGIONS (3) // 3 regions of update_every
+// first region update_every is 2, second is 3, third is 1
+static const int REGION_UPDATE_EVERY[REGIONS] = {2, 3, 1};
+static const int REGION_POINTS[REGIONS] = {
+ 16384, // This produces 64MiB of metric data for the first region: update_every = 2
+ 16384, // This produces 64MiB of metric data for the second region: update_every = 3
+ 16384, // This produces 64MiB of metric data for the third region: update_every = 1
+};
+static const int QUERY_BATCH = 4096;
+
+static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int update_every)
+{
+ int i, j;
+ char name[101];
for (i = 0 ; i < CHARTS ; ++i) {
snprintfz(name, 100, "dbengine-chart-%d", i);
// create the chart
st[i] = rrdset_create(host, "netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest",
- NULL, 1, 1, RRDSET_TYPE_LINE);
+ NULL, 1, update_every, RRDSET_TYPE_LINE);
rrdset_flag_set(st[i], RRDSET_FLAG_DEBUG);
rrdset_flag_set(st[i], RRDSET_FLAG_STORE_FIRST);
for (j = 0 ; j < DIMS ; ++j) {
@@ -1642,50 +1638,103 @@ int test_dbengine(void)
}
}
+ // Initialize DB with the very first entries
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0 ; j < DIMS ; ++j) {
+ rd[i][j]->last_collected_time.tv_sec =
+ st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = 2 * API_RELATIVE_TIME_MAX - 1;
+ rd[i][j]->last_collected_time.tv_usec =
+ st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
+ }
+ }
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->usec_since_last_update = USEC_PER_SEC;
+
+ for (j = 0; j < DIMS; ++j) {
+ rrddim_set_by_pointer_fake_time(rd[i][j], 69, 2 * API_RELATIVE_TIME_MAX); // set first value to 69
+ }
+ rrdset_done(st[i]);
+ }
+ // Fluh pages for subsequent real values
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+}
+
+// Feeds the database region with test data, returns last timestamp of region
+static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start)
+{
+ time_t time_now;
+ int i, j, c, update_every;
+ collected_number next;
+
+ update_every = REGION_UPDATE_EVERY[current_region];
+ time_now = time_start + update_every;
// feed it with the test data
- time_now = 1;
- last = 0;
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0 ; j < DIMS ; ++j) {
rd[i][j]->last_collected_time.tv_sec =
st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = time_now;
rd[i][j]->last_collected_time.tv_usec =
- st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
+ st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
}
}
- for(c = 0; c < POINTS ; ++c) {
- ++time_now; // time_now = c + 2
+ for (c = 0; c < REGION_POINTS[current_region] ; ++c) {
+ time_now += update_every; // time_now = start + (c + 2) * update_every
for (i = 0 ; i < CHARTS ; ++i) {
- st[i]->usec_since_last_update = USEC_PER_SEC;
+ st[i]->usec_since_last_update = USEC_PER_SEC * update_every;
for (j = 0; j < DIMS; ++j) {
- last = i * DIMS * POINTS + j * POINTS + c;
- rrddim_set_by_pointer_fake_time(rd[i][j], last, time_now);
+ next = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c;
+ rrddim_set_by_pointer_fake_time(rd[i][j], next, time_now);
}
rrdset_done(st[i]);
}
}
+ return time_now; //time_end
+}
- // check the result
+// Checks the metric data for the given region, returns number of errors
+static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start)
+{
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ int i, j, k, c, errors, update_every;
+ collected_number last;
+ calculated_number value, expected;
+ storage_number n;
+ struct rrddim_query_handle handle;
+
+ update_every = REGION_UPDATE_EVERY[current_region];
errors = 0;
- for(c = 0; c < POINTS ; c += QUERY_BATCH) {
- time_now = c + 2;
+ // check the result
+ for (c = 0; c < REGION_POINTS[current_region] ; c += QUERY_BATCH) {
+ time_now = time_start + (c + 2) * update_every;
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0; j < DIMS; ++j) {
- rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH);
+ rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH * update_every);
for (k = 0; k < QUERY_BATCH; ++k) {
- last = i * DIMS * POINTS + j * POINTS + c + k;
+ last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c + k;
expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
- n = rd[i][j]->state->query_ops.next_metric(&handle);
+ n = rd[i][j]->state->query_ops.next_metric(&handle, &time_retrieved);
value = unpack_storage_number(n);
same = (calculated_number_round(value * 10000000.0) == calculated_number_round(expected * 10000000.0)) ? 1 : 0;
if(!same) {
fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
- st[i]->name, rd[i][j]->name, (unsigned long)time_now + k, expected, value);
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now + k * update_every) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, (unsigned long)time_retrieved);
errors++;
}
}
@@ -1693,7 +1742,184 @@ int test_dbengine(void)
}
}
}
+ return errors;
+}
+
+// Check rrdr transformations
+static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start, time_t time_end)
+{
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ int i, j, errors, update_every;
+ long c;
+ collected_number last;
+ calculated_number value, expected;
+
+ errors = 0;
+ update_every = REGION_UPDATE_EVERY[current_region];
+ long points = (time_end - time_start) / update_every - 1;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ RRDR *r = rrd2rrdr(st[i], points, time_start + update_every, time_end, RRDR_GROUPING_AVERAGE, 0, 0, NULL);
+ if (!r) {
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ return ++errors;
+ } else {
+ assert(r->st == st[i]);
+ for (c = 0; c != rrdr_rows(r) ; ++c) {
+ RRDDIM *d;
+ time_now = time_start + (c + 2) * update_every;
+ time_retrieved = r->t[c];
+
+ // for each dimension
+ for (j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
+ calculated_number *cn = &r->v[ c * r->d ];
+ value = cn[j];
+ assert(rd[i][j] == d);
+
+ last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c;
+ expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ same = (calculated_number_round(value * 10000000.0) == calculated_number_round(expected * 10000000.0)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ }
+ rrdr_free(r);
+ }
+ }
+ return errors;
+}
+
+int test_dbengine(void)
+{
+ int i, j, errors, update_every, current_region;
+ RRDHOST *host = NULL;
+ RRDSET *st[CHARTS];
+ RRDDIM *rd[CHARTS][DIMS];
+ time_t time_start[REGIONS], time_end[REGIONS];
+
+ error_log_limit_unlimited();
+ fprintf(stderr, "\nRunning DB-engine test\n");
+
+ default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+
+ debug(D_RRDHOST, "Initializing localhost with hostname 'unittest-dbengine'");
+ host = dbengine_rrdhost_find_or_create("unittest-dbengine");
+ if (NULL == host)
+ return 1;
+
+ current_region = 0; // this is the first region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 2 seconds
+ test_dbengine_create_charts(host, st, rd, update_every);
+
+ time_start[current_region] = 2 * API_RELATIVE_TIME_MAX;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ current_region = 1; //this is the second region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 3 seconds
+ // Align pages for frequency change
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->update_every = update_every;
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+
+ time_start[current_region] = time_end[current_region - 1] + update_every;
+ if (0 != time_start[current_region] % update_every) // align to update_every
+ time_start[current_region] += update_every - time_start[current_region] % update_every;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ current_region = 2; //this is the third region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 1 seconds
+ // Align pages for frequency change
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->update_every = update_every;
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+
+ time_start[current_region] = time_end[current_region - 1] + update_every;
+ if (0 != time_start[current_region] % update_every) // align to update_every
+ time_start[current_region] += update_every - time_start[current_region] % update_every;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ for (current_region = 0 ; current_region < REGIONS ; ++current_region) {
+ errors = test_dbengine_check_rrdr(st, rd, current_region, time_start[current_region], time_end[current_region]);
+ if (errors)
+ goto error_out;
+ }
+
+ current_region = 1;
+ update_every = REGION_UPDATE_EVERY[current_region]; // use the maximum update_every = 3
+ errors = 0;
+ long points = (time_end[REGIONS - 1] - time_start[0]) / update_every - 1; // cover all time regions with RRDR
+ long point_offset = (time_start[current_region] - time_start[0]) / update_every;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ RRDR *r = rrd2rrdr(st[i], points, time_start[0] + update_every, time_end[REGIONS - 1], RRDR_GROUPING_AVERAGE, 0, 0, NULL);
+ if (!r) {
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ ++errors;
+ } else {
+ long c;
+
+ assert(r->st == st[i]);
+ // test current region values only, since they must be left unchanged
+ for (c = point_offset ; c < point_offset + rrdr_rows(r) / REGIONS / 2 ; ++c) {
+ RRDDIM *d;
+ time_t time_now = time_start[current_region] + (c - point_offset + 2) * update_every;
+ time_t time_retrieved = r->t[c];
+
+ // for each dimension
+ for(j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
+ calculated_number *cn = &r->v[ c * r->d ];
+ calculated_number value = cn[j];
+ assert(rd[i][j] == d);
+
+ collected_number last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c - point_offset;
+ calculated_number expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ uint8_t same = (calculated_number_round(value * 10000000.0) == calculated_number_round(expected * 10000000.0)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ }
+ rrdr_free(r);
+ }
+ }
+error_out:
rrdeng_exit(host->rrdeng_ctx);
rrd_wrlock();
rrdhost_delete_charts(host);
@@ -1704,43 +1930,25 @@ int test_dbengine(void)
void generate_dbengine_dataset(unsigned history_seconds)
{
- const int DIMS = 128;
+ const int DSET_DIMS = 128;
const uint64_t EXPECTED_COMPRESSION_RATIO = 94;
- int j;
+ int j, update_every = 1;
RRDHOST *host = NULL;
RRDSET *st;
- RRDDIM *rd[DIMS];
+ RRDDIM *rd[DSET_DIMS];
char name[101];
time_t time_current, time_present;
default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
default_rrdeng_page_cache_mb = 128;
- /* Worst case for uncompressible data */
- default_rrdeng_disk_quota_mb = (((uint64_t)DIMS) * sizeof(storage_number) * history_seconds) / (1024 * 1024);
+ // Worst case for uncompressible data
+ default_rrdeng_disk_quota_mb = (((uint64_t)DSET_DIMS) * sizeof(storage_number) * history_seconds) / (1024 * 1024);
default_rrdeng_disk_quota_mb -= default_rrdeng_disk_quota_mb * EXPECTED_COMPRESSION_RATIO / 100;
error_log_limit_unlimited();
debug(D_RRDHOST, "Initializing localhost with hostname 'dbengine-dataset'");
- host = rrdhost_find_or_create(
- "dbengine-dataset"
- , "dbengine-dataset"
- , "dbengine-dataset"
- , os_type
- , netdata_configured_timezone
- , config_get(CONFIG_SECTION_BACKEND, "host tags", "")
- , program_name
- , program_version
- , default_rrd_update_every
- , default_rrd_history_entries
- , RRD_MEMORY_MODE_DBENGINE
- , default_health_enabled
- , default_rrdpush_enabled
- , default_rrdpush_destination
- , default_rrdpush_api_key
- , default_rrdpush_send_charts_matching
- , NULL
- );
+ host = dbengine_rrdhost_find_or_create("dbengine-dataset");
if (NULL == host)
return;
@@ -1748,8 +1956,8 @@ void generate_dbengine_dataset(unsigned history_seconds)
// create the chart
st = rrdset_create(host, "example", "random", "random", "example", NULL, "random", "random", "random",
- NULL, 1, 1, RRDSET_TYPE_LINE);
- for (j = 0 ; j < DIMS ; ++j) {
+ NULL, 1, update_every, RRDSET_TYPE_LINE);
+ for (j = 0 ; j < DSET_DIMS ; ++j) {
snprintfz(name, 100, "random%d", j);
rd[j] = rrddim_add(st, name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1758,7 +1966,7 @@ void generate_dbengine_dataset(unsigned history_seconds)
time_present = now_realtime_sec();
// feed it with the test data
time_current = time_present - history_seconds;
- for (j = 0 ; j < DIMS ; ++j) {
+ for (j = 0 ; j < DSET_DIMS ; ++j) {
rd[j]->last_collected_time.tv_sec =
st->last_collected_time.tv_sec = st->last_updated.tv_sec = time_current;
rd[j]->last_collected_time.tv_usec =
@@ -1767,7 +1975,7 @@ void generate_dbengine_dataset(unsigned history_seconds)
for( ; time_current < time_present; ++time_current) {
st->usec_since_last_update = USEC_PER_SEC;
- for (j = 0; j < DIMS; ++j) {
+ for (j = 0; j < DSET_DIMS; ++j) {
rrddim_set_by_pointer_fake_time(rd[j], (time_current + j) % 128, time_current);
}
rrdset_done(st);