From cd7ed12292aef11d9062b64f61215174e8cc1860 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 3 Sep 2019 12:23:48 +0200 Subject: Merging upstream version 1.17.0. Signed-off-by: Daniel Baumann --- web/Makefile.in | 702 ++++++++++++++++++++ web/README.md | 10 +- web/api/Makefile.in | 760 +++++++++++++++++++++ web/api/README.md | 6 +- web/api/badges/Makefile.in | 514 +++++++++++++++ web/api/badges/README.md | 189 +++--- web/api/exporters/Makefile.in | 699 ++++++++++++++++++++ web/api/exporters/README.md | 2 +- web/api/exporters/prometheus/Makefile.in | 514 +++++++++++++++ web/api/exporters/prometheus/README.md | 4 +- web/api/exporters/shell/Makefile.in | 514 +++++++++++++++ web/api/exporters/shell/README.md | 10 +- web/api/formatters/Makefile.in | 701 ++++++++++++++++++++ web/api/formatters/README.md | 35 +- web/api/formatters/csv/Makefile.in | 514 +++++++++++++++ web/api/formatters/csv/README.md | 88 ++- web/api/formatters/json/Makefile.in | 514 +++++++++++++++ web/api/formatters/json/README.md | 37 +- web/api/formatters/json_wrapper.c | 13 +- web/api/formatters/rrdset2json.c | 5 +- web/api/formatters/ssv/Makefile.in | 514 +++++++++++++++ web/api/formatters/ssv/README.md | 26 +- web/api/formatters/value/Makefile.in | 514 +++++++++++++++ web/api/formatters/value/README.md | 12 +- web/api/health/Makefile.in | 514 +++++++++++++++ web/api/health/README.md | 66 +- web/api/netdata-swagger.json | 96 ++- web/api/netdata-swagger.yaml | 61 +- web/api/queries/Makefile.in | 706 ++++++++++++++++++++ web/api/queries/README.md | 137 ++-- web/api/queries/average/Makefile.in | 514 +++++++++++++++ web/api/queries/average/README.md | 10 +- web/api/queries/average/average.c | 9 +- web/api/queries/des/Makefile.in | 514 +++++++++++++++ web/api/queries/des/README.md | 14 +- web/api/queries/incremental_sum/Makefile.in | 514 +++++++++++++++ web/api/queries/incremental_sum/README.md | 12 +- web/api/queries/max/Makefile.in | 514 +++++++++++++++ web/api/queries/max/README.md | 10 +- web/api/queries/median/Makefile.in | 514 +++++++++++++++ web/api/queries/median/README.md | 12 +- web/api/queries/min/Makefile.in | 514 +++++++++++++++ web/api/queries/min/README.md | 10 +- web/api/queries/query.c | 876 ++++++++++++++++++++---- web/api/queries/rrdr.h | 12 +- web/api/queries/ses/Makefile.in | 514 +++++++++++++++ web/api/queries/ses/README.md | 14 +- web/api/queries/stddev/Makefile.in | 514 +++++++++++++++ web/api/queries/stddev/README.md | 23 +- web/api/queries/sum/Makefile.in | 514 +++++++++++++++ web/api/queries/sum/README.md | 12 +- web/api/web_api_v1.c | 46 ++ web/api/web_api_v1.h | 1 + web/gui/Makefile.in | 914 ++++++++++++++++++++++++++ web/gui/README.md | 32 +- web/gui/browserconfig.xml | 2 - web/gui/confluence/README.md | 50 +- web/gui/custom/README.md | 93 ++- web/gui/dashboard.css | 18 + web/gui/dashboard.js | 37 +- web/gui/dashboard.slate.css | 18 + web/gui/dashboard_info.js | 143 +++- web/gui/images/favicon-128.png | Bin 2436 -> 0 bytes web/gui/images/favicon-196x196.png | Bin 10025 -> 0 bytes web/gui/images/ms-icon-310x150.png | Bin 3632 -> 0 bytes web/gui/images/ms-icon-36x36.png | Bin 536 -> 0 bytes web/gui/images/packaging-beta-tag.svg | 42 -- web/gui/images/seo-performance-128.png | Bin 1828 -> 0 bytes web/gui/main.js | 14 +- web/gui/manifest.json | 41 -- web/gui/src/dashboard.js/charting/_c3.js | 114 ---- web/gui/src/dashboard.js/charting/_morris.js | 81 --- web/gui/src/dashboard.js/charting/_raphael.js | 48 -- web/gui/src/dashboard.js/charting/dygraph.js | 37 +- web/gui/version.txt | 1 + web/server/Makefile.in | 698 ++++++++++++++++++++ web/server/README.md | 134 ++-- web/server/static/Makefile.in | 697 ++++++++++++++++++++ web/server/static/README.md | 2 +- web/server/web_client.c | 10 +- 80 files changed, 16367 insertions(+), 1023 deletions(-) create mode 100644 web/Makefile.in create mode 100644 web/api/Makefile.in create mode 100644 web/api/badges/Makefile.in create mode 100644 web/api/exporters/Makefile.in create mode 100644 web/api/exporters/prometheus/Makefile.in create mode 100644 web/api/exporters/shell/Makefile.in create mode 100644 web/api/formatters/Makefile.in create mode 100644 web/api/formatters/csv/Makefile.in create mode 100644 web/api/formatters/json/Makefile.in create mode 100644 web/api/formatters/ssv/Makefile.in create mode 100644 web/api/formatters/value/Makefile.in create mode 100644 web/api/health/Makefile.in create mode 100644 web/api/queries/Makefile.in create mode 100644 web/api/queries/average/Makefile.in create mode 100644 web/api/queries/des/Makefile.in create mode 100644 web/api/queries/incremental_sum/Makefile.in create mode 100644 web/api/queries/max/Makefile.in create mode 100644 web/api/queries/median/Makefile.in create mode 100644 web/api/queries/min/Makefile.in create mode 100644 web/api/queries/ses/Makefile.in create mode 100644 web/api/queries/stddev/Makefile.in create mode 100644 web/api/queries/sum/Makefile.in create mode 100644 web/gui/Makefile.in delete mode 100644 web/gui/browserconfig.xml delete mode 100644 web/gui/images/favicon-128.png delete mode 100644 web/gui/images/favicon-196x196.png delete mode 100644 web/gui/images/ms-icon-310x150.png delete mode 100644 web/gui/images/ms-icon-36x36.png delete mode 100644 web/gui/images/packaging-beta-tag.svg delete mode 100644 web/gui/images/seo-performance-128.png delete mode 100644 web/gui/manifest.json delete mode 100644 web/gui/src/dashboard.js/charting/_c3.js delete mode 100644 web/gui/src/dashboard.js/charting/_morris.js delete mode 100644 web/gui/src/dashboard.js/charting/_raphael.js create mode 100644 web/gui/version.txt create mode 100644 web/server/Makefile.in create mode 100644 web/server/static/Makefile.in (limited to 'web') diff --git a/web/Makefile.in b/web/Makefile.in new file mode 100644 index 000000000..203be6d7a --- /dev/null +++ b/web/Makefile.in @@ -0,0 +1,702 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CUPSCONFIG = @CUPSCONFIG@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CXX_BINARY = @CXX_BINARY@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +JSON_CFLAGS = @JSON_CFLAGS@ +JSON_LIBS = @JSON_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@ +LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@ +LIBCURL_CFLAGS = @LIBCURL_CFLAGS@ +LIBCURL_LIBS = @LIBCURL_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@ +LIBMONGOC_LIBS = @LIBMONGOC_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSSL_CFLAGS = @LIBSSL_CFLAGS@ +LIBSSL_LIBS = @LIBSSL_LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@ +OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@ +OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@ +OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@ +OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@ +OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@ +OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@ +OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@ +OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@ +OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@ +OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@ +OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@ +OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@ +PROTOBUF_LIBS = @PROTOBUF_LIBS@ +PROTOC = @PROTOC@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@ +XENLIGHT_LIBS = @XENLIGHT_LIBS@ +YAJL_CFLAGS = @YAJL_CFLAGS@ +YAJL_LIBS = @YAJL_LIBS@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + api \ + gui \ + server \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + gui/confluence/README.md \ + gui/custom/README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/README.md b/web/README.md index 5c1a06f59..fc230f51d 100644 --- a/web/README.md +++ b/web/README.md @@ -1,14 +1,14 @@ # Web dashboards overview -The default port is 19999; for example, to access the dashboard on localhost, use: http://localhost:19999 +The default port is 19999; for example, to access the dashboard on localhost, use: To view Netdata collected data you access its **[REST API v1](api/)**. For our convenience, Netdata provides 2 more layers: -1. The `dashboard.js` javascript library that allows us to design custom dashboards using plain HTML. For information on creating custom dashboards, see **[Custom Dashboards](gui/custom/)** and **[Atlassian Confluence Dashboards](gui/confluence/)** +1. The `dashboard.js` javascript library that allows us to design custom dashboards using plain HTML. For information on creating custom dashboards, see **[Custom Dashboards](gui/custom/)** and **[Atlassian Confluence Dashboards](gui/confluence/)** -2. Ready to be used web dashboards that render all the charts a Netdata server maintains. +2. Ready to be used web dashboards that render all the charts a Netdata server maintains. ## Customizing the standard dashboards @@ -18,11 +18,11 @@ If you change that file, your changes will be overwritten when Netdata is update You have to copy the example file under a new name, so that it will not be overwritten with Netdata updates. -To configure your info file set in netdata.conf: +To configure your info file set in `netdata.conf`: ``` [web] custom dashboard_info.js = your_file_name.js ``` -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/web/api/Makefile.in b/web/api/Makefile.in new file mode 100644 index 000000000..98533d8c1 --- /dev/null +++ b/web/api/Makefile.in @@ -0,0 +1,760 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \ + $(dist_web_DATA) $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(webdir)" +DATA = $(dist_noinst_DATA) $(dist_web_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CUPSCONFIG = @CUPSCONFIG@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CXX_BINARY = @CXX_BINARY@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +JSON_CFLAGS = @JSON_CFLAGS@ +JSON_LIBS = @JSON_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@ +LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@ +LIBCURL_CFLAGS = @LIBCURL_CFLAGS@ +LIBCURL_LIBS = @LIBCURL_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@ +LIBMONGOC_LIBS = @LIBMONGOC_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSSL_CFLAGS = @LIBSSL_CFLAGS@ +LIBSSL_LIBS = @LIBSSL_LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@ +OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@ +OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@ +OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@ +OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@ +OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@ +OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@ +OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@ +OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@ +OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@ +OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@ +OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@ +OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@ +PROTOBUF_LIBS = @PROTOBUF_LIBS@ +PROTOC = @PROTOC@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@ +XENLIGHT_LIBS = @XENLIGHT_LIBS@ +YAJL_CFLAGS = @YAJL_CFLAGS@ +YAJL_LIBS = @YAJL_LIBS@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + badges \ + queries \ + exporters \ + formatters \ + health \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_web_DATA = \ + netdata-swagger.yaml \ + netdata-swagger.json \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-dist_webDATA: $(dist_web_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \ + done + +uninstall-dist_webDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(webdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-dist_webDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-dist_webDATA + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dist_webDATA install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \ + tags-am uninstall uninstall-am uninstall-dist_webDATA + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/README.md b/web/api/README.md index 44afbc90d..958b1580c 100644 --- a/web/api/README.md +++ b/web/api/README.md @@ -2,13 +2,13 @@ ## Netdata REST API -The complete documentation of the netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**. +The complete documentation of the Netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**. If your prefer it over the Swagger Editor, you can also use **[Swagger UI](https://registry.my-netdata.io/swagger/#!/default/get_data)**. This however does not provide all the information available. ## Google charts API -netdata is a [Google Visualization API datatable and datasource provider](https://developers.google.com/chart/interactive/docs/reference), so it can directly be used with [Google Charts](https://developers.google.com/chart/interactive/docs/). +Netdata is a [Google Visualization API datatable and datasource provider](https://developers.google.com/chart/interactive/docs/reference), so it can directly be used with [Google Charts](https://developers.google.com/chart/interactive/docs/). Check this [single chart, jsfiddle example](https://jsfiddle.net/ktsaou/ensu4uws/9/): @@ -18,4 +18,4 @@ and this [multi chart, jsfiddle example](https://jsfiddle.net/ktsaou/L5y2eqp2/): ![image](https://cloud.githubusercontent.com/assets/2662304/23824766/31a4a68c-0685-11e7-8429-8327cab64be2.png) -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/web/api/badges/Makefile.in b/web/api/badges/Makefile.in new file mode 100644 index 000000000..ea8c6d605 --- /dev/null +++ b/web/api/badges/Makefile.in @@ -0,0 +1,514 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/api/badges +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CUPSCONFIG = @CUPSCONFIG@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CXX_BINARY = @CXX_BINARY@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +JSON_CFLAGS = @JSON_CFLAGS@ +JSON_LIBS = @JSON_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@ +LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@ +LIBCURL_CFLAGS = @LIBCURL_CFLAGS@ +LIBCURL_LIBS = @LIBCURL_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@ +LIBMONGOC_LIBS = @LIBMONGOC_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSSL_CFLAGS = @LIBSSL_CFLAGS@ +LIBSSL_LIBS = @LIBSSL_LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@ +OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@ +OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@ +OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@ +OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@ +OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@ +OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@ +OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@ +OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@ +OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@ +OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@ +OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@ +OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@ +PROTOBUF_LIBS = @PROTOBUF_LIBS@ +PROTOC = @PROTOC@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@ +XENLIGHT_LIBS = @XENLIGHT_LIBS@ +YAJL_CFLAGS = @YAJL_CFLAGS@ +YAJL_LIBS = @YAJL_LIBS@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/badges/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/api/badges/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ + ctags-am distclean distclean-generic distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/api/badges/README.md b/web/api/badges/README.md index 6884cc11a..75c30abff 100644 --- a/web/api/badges/README.md +++ b/web/api/badges/README.md @@ -6,11 +6,11 @@ Netdata can generate badges for any chart and any dimension at any time-frame. B **Netdata badges are powerful**! -Given that netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful. +Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful. For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**. -For example, there is [a chart in netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc): +For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc): @@ -18,25 +18,25 @@ Similarly, there is [a chart that shows outbound bandwidth per class](http://lon -The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, netdata interpolates it to second boundary using microsecond calculations. +The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations. -Let's see a few more badge examples (they come from the [netdata registry](../../../registry/)): +Let's see a few more badge examples (they come from the [Netdata registry](../../../registry/)): -- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL). +- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL). - + -- **mysql queries per second** +- **mysql queries per second** - + - niche ones: **mysql SELECT statements with JOIN, which did full table scans**: + niche ones: **mysql SELECT statements with JOIN, which did full table scans**: - + --- -> So, every single line on the charts of a [netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value. +> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value. --- @@ -44,15 +44,15 @@ Let's see a few more badge examples (they come from the [netdata registry](../.. The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`. -Here is what you can put for `options` (these are standard netdata API options): +Here is what you can put for `options` (these are standard Netdata API options): -- `chart=CHART.NAME` +- `chart=CHART.NAME` - The chart to get the values from. + The chart to get the values from. - **This is the only parameter required** and with just this parameter, netdata will return the sum of the latest values of all chart dimensions. + **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions. - Example: + Example: ```html @@ -66,21 +66,21 @@ Here is what you can put for `options` (these are standard netdata API options): -- `alarm=NAME` +- `alarm=NAME` - Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm. + Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm. - The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm. + The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm. - For alarm badges, **both `chart` and `alarm` parameters are required**. + For alarm badges, **both `chart` and `alarm` parameters are required**. -- `dimensions=DIMENSION1|DIMENSION2|...` +- `dimensions=DIMENSION1|DIMENSION2|...` - The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them. + The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them. - Pipes in HTML have to escaped with `%7C`. + Pipes in HTML have to escaped with `%7C`. - Example: + Example: ```html @@ -94,15 +94,15 @@ Here is what you can put for `options` (these are standard netdata API options): -- `before=SECONDS` and `after=SECONDS` +- `before=SECONDS` and `after=SECONDS` - The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past). + The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past). - To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59). + To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59). - To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59) + To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59) - Example: + Example: ```html @@ -125,109 +125,108 @@ Here is what you can put for `options` (these are standard netdata API options): ``` It produces this: - + -- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum` +- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum` - If netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use. + If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use. - - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value. + - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value. - - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero. + - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero. - - `average` will calculate the average value for the timeframe. + - `average` will calculate the average value for the timeframe. - - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe. + - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe. - - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1). + - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1). -- `options=opt1|opt2|opt3|...` +- `options=opt1|opt2|opt3|...` - These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges): + These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges): - - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`. + - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`. - - `absolute` or `abs`, turn all values positive and then sum them. + - `absolute` or `abs`, turn all values positive and then sum them. - - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge. + - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge. - - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`. + - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`. - - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data. + - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data. These are options dedicated to badges: -- `label=TEXT` - - The label of the badge. +- `label=TEXT` -- `units=TEXT` + The label of the badge. - The units of the badge. If you want to put a `/`, please put a `\`. This is because netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units. +- `units=TEXT` - The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and netdata will automatically change it to show a more pretty duration. + The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units. -- `multiply=NUMBER` + The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration. - Multiply the value with this number. The default is `1`. +- `multiply=NUMBER` -- `divide=NUMBER` + Multiply the value with this number. The default is `1`. - Divide the value with this number. The default is `1`. +- `divide=NUMBER` -- `label_color=COLOR` + Divide the value with this number. The default is `1`. - The color of the label (the left part). You can use any HTML color, include `#NNN` and `#NNNNNN`. The following colors are defined in netdata (and you can use them by name): `green`, `brightgreen`, `yellow`, `yellowgreen`, `orange`, `red`, `blue`, `grey`, `gray`, `lightgrey`, `lightgray`. These are taken from https://github.com/badges/shields so they are compatible with standard badges. +- `label_color=COLOR` -- `value_color=COLOR:null|COLORVALUE|COLOR>=VALUE|COLOR<=VALUE|...` + The color of the label (the left part). You can use any HTML color, include `#NNN` and `#NNNNNN`. The following colors are defined in Netdata (and you can use them by name): `green`, `brightgreen`, `yellow`, `yellowgreen`, `orange`, `red`, `blue`, `grey`, `gray`, `lightgrey`, `lightgray`. These are taken from so they are compatible with standard badges. - You can add a pipe delimited list of conditions to pick the color. The first matching (left to right) will be used. +- `value_color=COLOR:null|COLORVALUE|COLOR>=VALUE|COLOR<=VALUE|...` - Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red` + You can add a pipe delimited list of conditions to pick the color. The first matching (left to right) will be used. - The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in netdata.conf for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, etc up to `red` which will be used if no other conditions match. + Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red` - The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`) and `!=` (or `<>`). + The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, etc up to `red` which will be used if no other conditions match. -- `precision=NUMBER` + The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`) and `!=` (or `<>`). - The number of decimal digits of the value. By default netdata will add: +- `precision=NUMBER` - - no decimal digits for values > 1000 - - 1 decimal digit for values > 100 - - 2 decimal digits for values > 1 - - 3 decimal digits for values > 0.1 - - 4 decimal digits for values <= 0.1 + The number of decimal digits of the value. By default Netdata will add: - Using the `precision=NUMBER` you can set your preference per badge. + - no decimal digits for values > 1000 + - 1 decimal digit for values > 100 + - 2 decimal digits for values > 1 + - 3 decimal digits for values > 0.1 + - 4 decimal digits for values \<= 0.1 -- `scale=XXX` + Using the `precision=NUMBER` you can set your preference per badge. - This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes: +- `scale=XXX` - original
- `scale=125`
- `scale=150`
- `scale=175`
- `scale=200` + This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes: + original
+ `scale=125`
+ `scale=150`
+ `scale=175`
+ `scale=200` -- `refresh=auto` or `refresh=SECONDS` +- `refresh=auto` or `refresh=SECONDS` - This option enables auto-refreshing of images. netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals. + This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals. - `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default). + `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default). - Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this: + Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this: ```html ``` - Another way is to use javascript to auto-refresh them. You can auto-refresh all the netdata badges on a page using javascript. You have to add a class to all the netdata badges, like this ``. Then add this javascript code to your page (it requires jquery): + Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this ``. Then add this javascript code to your page (it requires jquery): ```html @@ -67,23 +65,23 @@ For example, if your netdata server listens at `http://box:19999/`, you will nee `dashboard.js` will automatically load the following: -1. `dashboard.css`, required for the netdata charts +1. `dashboard.css`, required for the Netdata charts -2. `jquery.min.js`, (only if jquery is not already loaded for this web page) +2. `jquery.min.js`, (only if jquery is not already loaded for this web page) -3. `bootstrap.min.js` (only if bootstrap is not already loaded) and `bootstrap.min.css`. +3. `bootstrap.min.js` (only if bootstrap is not already loaded) and `bootstrap.min.css`. - You can disable this by adding the following before loading `dashboard.js`: + You can disable this by adding the following before loading `dashboard.js`: ```html ``` -4. `jquery.nanoscroller.min.js`, required for the scrollbar of the chart legends. +4. `jquery.nanoscroller.min.js`, required for the scrollbar of the chart legends. -5. `bootstrap-toggle.min.js` and `bootstrap-toggle.min.css`, required for the settings toggle buttons. +5. `bootstrap-toggle.min.js` and `bootstrap-toggle.min.css`, required for the settings toggle buttons. -6. `font-awesome.min.css`, for icons. +6. `font-awesome.min.css`, for icons. When `dashboard.js` loads will scan the page for elements that define charts (see below) and immediately start refreshing them. Keep in mind more javascript modules may be loaded (every chart library is a different javascript file, that is loaded on first use). @@ -117,11 +115,11 @@ NETDATA.pause(function() { }); ``` -### The default netdata server +### The default Netdata server -`dashboard.js` will attempt to auto-detect the URL of the netdata server it is loaded from, and set this server as the default netdata server for all charts. +`dashboard.js` will attempt to auto-detect the URL of the Netdata server it is loaded from, and set this server as the default Netdata server for all charts. -If you need to set any other URL as the default netdata server for all charts that do not specify a netdata server, add this before loading `dashboard.js`: +If you need to set any other URL as the default Netdata server for all charts that do not specify a Netdata server, add this before loading `dashboard.js`: ```html @@ -129,13 +127,13 @@ If you need to set any other URL as the default netdata server for all charts th --- -# Adding charts +## Adding charts To add charts, you need to add a `div` for each of them. Each of these `div` elements accept a few `data-` attributes: ### The chart unique ID -The unique ID of a chart is shown at the title of the chart of the default netdata dashboard. You can also find all the charts available at your netdata server with this URL: `http://your.netdata.server:19999/api/v1/charts` ([example](http://netdata.firehol.org/api/v1/charts)). +The unique ID of a chart is shown at the title of the chart of the default Netdata dashboard. You can also find all the charts available at your Netdata server with this URL: `http://your.netdata.server:19999/api/v1/charts` ([example](http://netdata.firehol.org/api/v1/charts)). To specify the unique id, use this: @@ -160,9 +158,9 @@ You can specify the duration of the chart (how much time of data it will show) u The can be either: -- **absolute** unix timestamps (in javascript terms, they are `new Date().getTime() / 1000`. Using absolute timestamps you can have a chart showing always the same time-frame. +- **absolute** unix timestamps (in javascript terms, they are `new Date().getTime() / 1000`. Using absolute timestamps you can have a chart showing always the same time-frame. -- **relative** number of seconds to now. To show the last 10 minutes of data, `AFTER_SECONDS` must be `-600` (relative to now) and `BEFORE_SECONDS` must be `0` (meaning: now). If you want the chart to auto-refresh the current values, you need to specify **relative** values. +- **relative** number of seconds to now. To show the last 10 minutes of data, `AFTER_SECONDS` must be `-600` (relative to now) and `BEFORE_SECONDS` must be `0` (meaning: now). If you want the chart to auto-refresh the current values, you need to specify **relative** values. ### Chart sizes @@ -182,7 +180,7 @@ If you want `dashboard.js` to remember permanently (browser local storage) the d ### Netdata server -Each chart can get data from a different netdata server. You can give per chart the netdata server using: +Each chart can get data from a different Netdata server. You can give per chart the Netdata server using: ```html
``` + ### Chart library The default chart library is `dygraph`. You set a different chart library per chart using this: @@ -209,21 +208,20 @@ The default chart library is `dygraph`. You set a different chart library per ch Each chart library may support more chart-library specific settings. Please refer to the documentation of the chart library you are interested, in this wiki or the source code: -- options `data-dygraph-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L6251-L6361) -- options `data-easypiechart-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7954-L7966) -- options `data-gauge-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L8182-L8189) -- options `data-d3pie-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7394-L7561) -- options `data-sparkline-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5940-L5985) -- options `data-peity-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5892) - +- options `data-dygraph-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L6251-L6361) +- options `data-easypiechart-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7954-L7966) +- options `data-gauge-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L8182-L8189) +- options `data-d3pie-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7394-L7561) +- options `data-sparkline-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5940-L5985) +- options `data-peity-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5892) ### Data points For the time-frame requested, `dashboard.js` will use the chart dimensions and the settings of the chart library to find out how many data points it can show. -For example, most line chart libraries are using 3 pixels per data point. If the chart shows 10 minutes of data (600 seconds), its update frequency is 1 second, and the chart width is 1800 pixels, then `dashboard.js` will request from the netdata server: 10 minutes of data, represented in 600 points, and the chart will be refreshed per second. If the user resizes the window so that the chart becomes 600 pixels wide, then `dashboard.js` will request the same 10 minutes of data, represented in 200 points and the chart will be refreshed once every 3 seconds. +For example, most line chart libraries are using 3 pixels per data point. If the chart shows 10 minutes of data (600 seconds), its update frequency is 1 second, and the chart width is 1800 pixels, then `dashboard.js` will request from the Netdata server: 10 minutes of data, represented in 600 points, and the chart will be refreshed per second. If the user resizes the window so that the chart becomes 600 pixels wide, then `dashboard.js` will request the same 10 minutes of data, represented in 200 points and the chart will be refreshed once every 3 seconds. -If you need to have a fixed number of points in the data source retrieved from the netdata server, you can set: +If you need to have a fixed number of points in the data source retrieved from the Netdata server, you can set: ```html
``` -netdata supports coma (` , `) or pipe (` | `) separated [simple patterns](../../../libnetdata/simple_pattern/) for dimensions. By default it searches for both dimension IDs and dimension NAMEs. You can control the target of the match with: `data-append-options="match-ids"` or `data-append-options="match-names"`. Spaces in `data-dimensions=""` are matched in the dimension names and IDs. +Netdata supports coma (`,`) or pipe (`|`) separated [simple patterns](../../../libnetdata/simple_pattern/) for dimensions. By default it searches for both dimension IDs and dimension NAMEs. You can control the target of the match with: `data-append-options="match-ids"` or `data-append-options="match-names"`. Spaces in `data-dimensions=""` are matched in the dimension names and IDs. ### Chart title @@ -344,7 +342,7 @@ On charts that by default have a legend managed by `dashboard.js` you can remove ### API options -You can append netdata **[REST API v1](../../api)** data options, using this: +You can append Netdata **[REST API v1](../../api)** data options, using this: ```html
milliseconds! ### Syncing charts y-range -If you give the same `data-common-max="NAME"` to 2+ charts, then all of them will share the same max value of their y-range. If one spikes, all of them will be aligned to have the same scale. This is done for the cpu interrupts and and cpu softnet charts at the dashboard and also for the `gauge` and `easypiecharts` of the netdata home page. +If you give the same `data-common-max="NAME"` to 2+ charts, then all of them will share the same max value of their y-range. If one spikes, all of them will be aligned to have the same scale. This is done for the cpu interrupts and and cpu softnet charts at the dashboard and also for the `gauge` and `easypiecharts` of the Netdata home page. ```html
``` - -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/web/gui/dashboard.css b/web/gui/dashboard.css index 8062497d0..674131a1d 100644 --- a/web/gui/dashboard.css +++ b/web/gui/dashboard.css @@ -737,3 +737,21 @@ body { .ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y { background-color: #999; /* scrollbar color on hover */ } + +.dygraph__history-tip { + position: absolute; + top: 50%; + transform: translateY(-50%); + display: none; /* overriden in js */ + margin-right: 25px; + direction: rtl; + overflow: hidden; + pointer-events: none; +} + +.dygraph__history-tip-content { + display: inline-block; + white-space: nowrap; + direction: ltr; + pointer-events: auto; +} diff --git a/web/gui/dashboard.js b/web/gui/dashboard.js index a32a29be7..0c379dc05 100644 --- a/web/gui/dashboard.js +++ b/web/gui/dashboard.js @@ -2281,8 +2281,28 @@ NETDATA.dygraphChartCreate = function (state, data) { NETDATA.globalSelectionSync.stop(); }, underlayCallback: function (canvas, area, g) { - // the chart is about to be drawn + + // update history_tip_element + if (state.tmp.dygraph_history_tip_element) { + const xHookRightSide = g.toDomXCoord(state.netdata_first); + if (xHookRightSide > area.x) { + state.tmp.dygraph_history_tip_element_displayed = true; + // group the styles for possible better performance + state.tmp.dygraph_history_tip_element.setAttribute( + 'style', + `display: block; left: ${area.x}px; right: calc(100% - ${xHookRightSide}px);` + ) + } else { + if (state.tmp.dygraph_history_tip_element_displayed) { + // additional check just for performance + // don't update the DOM when it's not needed + state.tmp.dygraph_history_tip_element.style.display = 'none'; + state.tmp.dygraph_history_tip_element_displayed = false; + } + } + } + // this function renders global highlighted time-frame if (NETDATA.globalChartUnderlay.isActive()) { @@ -2751,6 +2771,21 @@ NETDATA.dygraphChartCreate = function (state, data) { state.tmp.dygraph_instance = new Dygraph(state.element_chart, data.result.data, state.tmp.dygraph_options); + + state.tmp.dygraph_history_tip_element = document.createElement('div'); + state.tmp.dygraph_history_tip_element.innerHTML = ` + + Want to extend your history of real-time metrics? +
+ + Configure Netdata's history + or use the DB engine. +
+ `; + state.tmp.dygraph_history_tip_element.className = 'dygraph__history-tip'; + state.element_chart.appendChild(state.tmp.dygraph_history_tip_element); + + state.tmp.dygraph_force_zoom = false; state.tmp.dygraph_user_action = false; state.tmp.dygraph_last_rendered = Date.now(); diff --git a/web/gui/dashboard.slate.css b/web/gui/dashboard.slate.css index f1c9c4101..af3382521 100644 --- a/web/gui/dashboard.slate.css +++ b/web/gui/dashboard.slate.css @@ -755,3 +755,21 @@ code { .ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y { background-color: #999; /* scrollbar color on hover */ } + +.dygraph__history-tip { + position: absolute; + top: 50%; + transform: translateY(-50%); + display: none; /* overriden in js */ + margin-right: 25px; + direction: rtl; + overflow: hidden; + pointer-events: none; +} + +.dygraph__history-tip-content { + display: inline-block; + white-space: nowrap; + direction: ltr; + pointer-events: auto; +} diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js index df9242646..130162be3 100644 --- a/web/gui/dashboard_info.js +++ b/web/gui/dashboard_info.js @@ -486,6 +486,12 @@ netdataDashboard.menu = { title: 'Perf Counters', icon: '', info: 'Performance Monitoring Counters (PMC). Data collected using perf_event_open() system call which utilises Hardware Performance Monitoring Units (PMU).' + }, + + 'vsphere': { + title: 'vSphere', + icon: '', + info: 'Performance statistics for ESXI hosts and virtual machines. Data collected from VMware vCenter Server using govmomi library.' } }; @@ -826,6 +832,32 @@ netdataDashboard.context = { ] }, + 'mem.zram_usage': { + info: 'ZRAM total RAM usage metrics. ZRAM uses some memory to store metadata about stored memory pages, thus introducing an overhead which is proportional to disk size. It excludes same-element-filled-pages since no memory is allocated for them.' + }, + + 'mem.zram_savings': { + info: 'Displays original and compressed memory data sizes.' + }, + + 'mem.zram_ratio': { + heads: [ + netdataDashboard.gaugeChart('Compression Ratio', '12%', 'ratio', '#0099CC') + ], + info: 'Compression ratio, calculated as 100 * original_size / compressed_size. More means better compression and more RAM savings.' + }, + + 'mem.zram_efficiency': { + heads: [ + netdataDashboard.gaugeChart('Efficiency', '12%', 'percent', NETDATA.colors[0]) + ], + commonMin: true, + commonMax: true, + valueRange: "[0, 100]", + info: 'Memory usage efficiency, calculated as 100 * compressed_size / total_mem_used.' + }, + + 'mem.pgfaults': { info: 'A page fault is a type of interrupt, called trap, raised by computer hardware when a running program accesses a memory page that is mapped into the virtual address space, but not actually loaded into main memory. If the page is loaded in memory at the time the fault is generated, but is not marked in the memory management unit as being loaded in memory, then it is called a minor or soft page fault. A major page fault is generated when the system needs to load the memory page from disk or swap memory.' }, @@ -953,6 +985,10 @@ netdataDashboard.context = { height: 2.0 }, + 'apps.uptime': { + info: 'Carried over process group uptime since the Netdata restart. The period of time within which at least one process in the group was running.' + }, + // ------------------------------------------------------------------------ // USERS @@ -976,6 +1012,10 @@ netdataDashboard.context = { height: 2.0 }, + 'users.uptime': { + info: 'Carried over process group uptime since the Netdata restart. The period of time within which at least one process in the group was running.' + }, + // ------------------------------------------------------------------------ // GROUPS @@ -988,7 +1028,7 @@ netdataDashboard.context = { }, 'groups.vmem': { - info: 'Virtual memory allocated per user group. Please check this article for more information.' + info: 'Virtual memory allocated per user group since the Netdata restart. Please check this article for more information.' }, 'groups.preads': { @@ -999,6 +1039,10 @@ netdataDashboard.context = { height: 2.0 }, + 'groups.uptime': { + info: 'Carried over process group uptime. The period of time within which at least one process in the group was running.' + }, + // ------------------------------------------------------------------------ // NETWORK QoS @@ -2296,7 +2340,7 @@ netdataDashboard.context = { }, 'spigotmc.users': { - info: 'THe number of currently connect users on the monitored Spigot server.' + info: 'The number of currently connect users on the monitored Spigot server.' }, 'unbound.queries': { @@ -2442,7 +2486,100 @@ netdataDashboard.context = { 'powersupply.voltage': { info: undefined - } + }, // ------------------------------------------------------------------------ + // VMware vSphere + + // Host specific + 'vsphere.host_mem_usage_percentage': { + info: 'Percentage of used machine memory: consumed / machine-memory-size.' + }, + + 'vsphere.host_mem_usage': { + info: + 'granted is amount of machine memory that is mapped for a host, '+ + 'it equals sum of all granted metrics for all powered-on virtual machines, plus machine memory for vSphere services on the host. ' + + 'consumed is amount of machine memory used on the host, it includes memory used by the Service Console, the VMkernel, vSphere services, plus the total consumed metrics for all running virtual machines. ' + + 'consumed = total host memory - free host memory.' + + 'active is sum of all active metrics for all powered-on virtual machines plus vSphere services (such as COS, vpxa) on the host.' + + 'shared is sum of all shared metrics for all powered-on virtual machines, plus amount for vSphere services on the host. ' + + 'sharedcommon is amount of machine memory that is shared by all powered-on virtual machines and vSphere services on the host. ' + + 'shared - sharedcommon = machine memory (host memory) savings (KB). ' + + 'For details see Measuring and Differentiating Types of Memory Usage and ' + + 'Memory Counters articles.' + }, + + 'vsphere.host_mem_swap_rate': { + info: + 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' + + 'in is sum of swapinRate values for all powered-on virtual machines on the host.' + + 'swapinRate is rate at which VMKernel reads data into machine memory from the swap file. ' + + 'out is sum of swapoutRate values for all powered-on virtual machines on the host.' + + 'swapoutRate is rate at which VMkernel writes to the virtual machine’s swap file from machine memory.' + }, + + // VM specific + 'vsphere.vm_mem_usage_percentage': { + info: 'Percentage of used virtual machine “physical” memory: actvive / virtual machine configured size.' + }, + + 'vsphere.vm_mem_usage': { + info: + 'granted is amount of guest “physical” memory that is mapped to machine memory, it includes shared memory amount. ' + + 'consumed is amount of guest “physical” memory consumed by the virtual machine for guest memory, ' + + 'consumed = granted - memory saved due to memory sharing. ' + + 'active is amount of memory that is actively used, as estimated by VMkernel based on recently touched memory pages. ' + + 'shared is amount of guest “physical” memory shared with other virtual machines (through the VMkernel’s transparent page-sharing mechanism, a RAM de-duplication technique). ' + + 'For details see Measuring and Differentiating Types of Memory Usage and ' + + 'Memory Counters articles.' + + }, + + 'vsphere.vm_mem_swap_rate': { + info: + 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' + + 'in is rate at which VMKernel reads data into machine memory from the swap file. ' + + 'out is rate at which VMkernel writes to the virtual machine’s swap file from machine memory.' + }, + + 'vsphere.vm_mem_swap': { + info: + 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' + + 'swapped is amount of guest physical memory swapped out to the virtual machine\'s swap file by the VMkernel. ' + + 'Swapped memory stays on disk until the virtual machine needs it.' + }, + + // Common + 'vsphere.cpu_usage_total': { + info: 'Summary CPU usage statistics across all CPUs/cores.' + }, + + 'vsphere.net_bandwidth_total': { + info: 'Summary receive/transmit statistics across all network interfaces.' + }, + + 'vsphere.net_packets_total': { + info: 'Summary receive/transmit statistics across all network interfaces.' + }, + + 'vsphere.net_errors_total': { + info: 'Summary receive/transmit statistics across all network interfaces.' + }, + + 'vsphere.net_drops_total': { + info: 'Summary receive/transmit statistics across all network interfaces.' + }, + + 'vsphere.disk_usage_total': { + info: 'Summary read/write statistics across all disks.' + }, + + 'vsphere.disk_max_latency': { + info: 'latency is highest latency value across all disks.' + }, + + 'vsphere.overall_status': { + info: '0 is unknown, 1 is OK, 2 is might have a problem, 3 is definitely has a problem.' + } }; diff --git a/web/gui/images/favicon-128.png b/web/gui/images/favicon-128.png deleted file mode 100644 index 5371f920c..000000000 Binary files a/web/gui/images/favicon-128.png and /dev/null differ diff --git a/web/gui/images/favicon-196x196.png b/web/gui/images/favicon-196x196.png deleted file mode 100644 index a208c27fa..000000000 Binary files a/web/gui/images/favicon-196x196.png and /dev/null differ diff --git a/web/gui/images/ms-icon-310x150.png b/web/gui/images/ms-icon-310x150.png deleted file mode 100644 index 5d4ac57b6..000000000 Binary files a/web/gui/images/ms-icon-310x150.png and /dev/null differ diff --git a/web/gui/images/ms-icon-36x36.png b/web/gui/images/ms-icon-36x36.png deleted file mode 100644 index e251302ed..000000000 Binary files a/web/gui/images/ms-icon-36x36.png and /dev/null differ diff --git a/web/gui/images/packaging-beta-tag.svg b/web/gui/images/packaging-beta-tag.svg deleted file mode 100644 index cebdc0847..000000000 --- a/web/gui/images/packaging-beta-tag.svg +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/web/gui/images/seo-performance-128.png b/web/gui/images/seo-performance-128.png deleted file mode 100644 index 2a212a475..000000000 Binary files a/web/gui/images/seo-performance-128.png and /dev/null differ diff --git a/web/gui/main.js b/web/gui/main.js index 1214eba6f..6aebb8ed3 100644 --- a/web/gui/main.js +++ b/web/gui/main.js @@ -1788,7 +1788,19 @@ function renderPage(menus, data) { sidebar += '
  • add more charts
  • '; sidebar += '
  • add more alarms
  • '; - sidebar += '
  • netdata on ' + data.hostname.toString() + ', collects every ' + ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ' ' + data.dimensions_count.toLocaleString() + ' metrics, presented as ' + data.charts_count.toLocaleString() + ' charts and monitored by ' + data.alarms_count.toLocaleString() + ' alarms, using ' + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory for ' + NETDATA.seconds4human(data.update_every * data.history, { space: ' ' }) + ' of real-time history.
     
    netdata
    ' + data.version.toString() + '
  • '; + sidebar += '
  • Every ' + + ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ', ' + + 'Netdata collects ' + data.dimensions_count.toLocaleString() + ' metrics, presents them in ' + + data.charts_count.toLocaleString() + ' charts and monitors them with ' + + data.alarms_count.toLocaleString() + ' alarms. Netdata is using ' + + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory on ' + + data.hostname.toString() + ' for ' + + NETDATA.seconds4human(data.update_every * data.history, { + minute: 'minute', minutes: 'minutes', second: 'second', seconds: 'seconds', space: ' ', + }) + + ' of real-time history.
     
    ' + 'Get more history by ' + + 'configuring Netdata\'s history or using the DB engine.' + + '
     
    netdata
    ' + data.version.toString() + '
  • '; sidebar += ''; div.innerHTML = html; document.getElementById('sidebar').innerHTML = sidebar; diff --git a/web/gui/manifest.json b/web/gui/manifest.json deleted file mode 100644 index 52cb4831c..000000000 --- a/web/gui/manifest.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "name": "App", - "icons": [ - { - "src": "images\/android-icon-36x36.png", - "sizes": "36x36", - "type": "image\/png", - "density": "0.75" - }, - { - "src": "images\/android-icon-48x48.png", - "sizes": "48x48", - "type": "image\/png", - "density": "1.0" - }, - { - "src": "images\/android-icon-72x72.png", - "sizes": "72x72", - "type": "image\/png", - "density": "1.5" - }, - { - "src": "images\/android-icon-96x96.png", - "sizes": "96x96", - "type": "image\/png", - "density": "2.0" - }, - { - "src": "images\/android-icon-144x144.png", - "sizes": "144x144", - "type": "image\/png", - "density": "3.0" - }, - { - "src": "images\/android-icon-192x192.png", - "sizes": "192x192", - "type": "image\/png", - "density": "4.0" - } - ] -} diff --git a/web/gui/src/dashboard.js/charting/_c3.js b/web/gui/src/dashboard.js/charting/_c3.js deleted file mode 100644 index 6688bbcce..000000000 --- a/web/gui/src/dashboard.js/charting/_c3.js +++ /dev/null @@ -1,114 +0,0 @@ - -// DEPRECATED: will be removed! - -// c3 - -NETDATA.c3Initialize = function(callback) { - if (typeof netdataNoC3 === 'undefined' || !netdataNoC3) { - - // C3 requires D3 - if (!NETDATA.chartLibraries.d3.initialized) { - if (NETDATA.chartLibraries.d3.enabled) { - NETDATA.d3Initialize(function() { - NETDATA.c3Initialize(callback); - }); - } else { - NETDATA.chartLibraries.c3.enabled = false; - if (typeof callback === "function") - return callback(); - } - } else { - NETDATA._loadCSS(NETDATA.c3_css); - - $.ajax({ - url: NETDATA.c3_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('c3', NETDATA.c3_js); - }) - .fail(function() { - NETDATA.chartLibraries.c3.enabled = false; - NETDATA.error(100, NETDATA.c3_js); - }) - .always(function() { - if (typeof callback === "function") - return callback(); - }); - } - } else { - NETDATA.chartLibraries.c3.enabled = false; - if (typeof callback === "function") - return callback(); - } -}; - -NETDATA.c3ChartUpdate = function(state, data) { - state.c3_instance.destroy(); - return NETDATA.c3ChartCreate(state, data); - - //state.c3_instance.load({ - // rows: data.result, - // unload: true - //}); - - //return true; -}; - -NETDATA.c3ChartCreate = function(state, data) { - - state.element_chart.id = 'c3-' + state.uuid; - // console.log('id = ' + state.element_chart.id); - - state.c3_instance = c3.generate({ - bindto: '#' + state.element_chart.id, - size: { - width: state.chartWidth(), - height: state.chartHeight() - }, - color: { - pattern: state.chartColors() - }, - data: { - x: 'time', - rows: data.result, - type: (state.chart.chart_type === 'line')?'spline':'area-spline' - }, - axis: { - x: { - type: 'timeseries', - tick: { - format: function(x) { - return NETDATA.dateTime.xAxisTimeString(x); - } - } - } - }, - grid: { - x: { - show: true - }, - y: { - show: true - } - }, - point: { - show: false - }, - line: { - connectNull: false - }, - transition: { - duration: 0 - }, - interaction: { - enabled: true - } - }); - - // console.log(state.c3_instance); - - return true; -}; diff --git a/web/gui/src/dashboard.js/charting/_morris.js b/web/gui/src/dashboard.js/charting/_morris.js deleted file mode 100644 index 30789e4e2..000000000 --- a/web/gui/src/dashboard.js/charting/_morris.js +++ /dev/null @@ -1,81 +0,0 @@ - -// DEPRECATED: will be removed! - -// morris - -NETDATA.morrisInitialize = function(callback) { - if (typeof netdataNoMorris === 'undefined' || !netdataNoMorris) { - - // morris requires raphael - if (!NETDATA.chartLibraries.raphael.initialized) { - if (NETDATA.chartLibraries.raphael.enabled) { - NETDATA.raphaelInitialize(function() { - NETDATA.morrisInitialize(callback); - }); - } else { - NETDATA.chartLibraries.morris.enabled = false; - if (typeof callback === "function") - return callback(); - } - } else { - NETDATA._loadCSS(NETDATA.morris_css); - - $.ajax({ - url: NETDATA.morris_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('morris', NETDATA.morris_js); - }) - .fail(function() { - NETDATA.chartLibraries.morris.enabled = false; - NETDATA.error(100, NETDATA.morris_js); - }) - .always(function() { - if (typeof callback === "function") - return callback(); - }); - } - } else { - NETDATA.chartLibraries.morris.enabled = false; - if (typeof callback === "function") - return callback(); - } -}; - -NETDATA.morrisChartUpdate = function(state, data) { - state.morris_instance.setData(data.result.data); - return true; -}; - -NETDATA.morrisChartCreate = function(state, data) { - - state.morris_options = { - element: state.element_chart.id, - data: data.result.data, - xkey: 'time', - ykeys: data.dimension_names, - labels: data.dimension_names, - lineWidth: 2, - pointSize: 3, - smooth: true, - hideHover: 'auto', - parseTime: true, - continuousLine: false, - behaveLikeLine: false - }; - - if (state.chart.chart_type === 'line') - state.morris_instance = new Morris.Line(state.morris_options); - - else if (state.chart.chart_type === 'area') { - state.morris_options.behaveLikeLine = true; - state.morris_instance = new Morris.Area(state.morris_options); - } - else // stacked - state.morris_instance = new Morris.Area(state.morris_options); - - return true; -}; diff --git a/web/gui/src/dashboard.js/charting/_raphael.js b/web/gui/src/dashboard.js/charting/_raphael.js deleted file mode 100644 index 2d89a22a8..000000000 --- a/web/gui/src/dashboard.js/charting/_raphael.js +++ /dev/null @@ -1,48 +0,0 @@ - -// DEPRECATED: will be removed! - -// raphael - -NETDATA.raphaelInitialize = function(callback) { - if (typeof netdataStopRaphael === 'undefined' || !netdataStopRaphael) { - $.ajax({ - url: NETDATA.raphael_js, - cache: true, - dataType: "script", - xhrFields: { withCredentials: true } // required for the cookie - }) - .done(function() { - NETDATA.registerChartLibrary('raphael', NETDATA.raphael_js); - }) - .fail(function() { - NETDATA.chartLibraries.raphael.enabled = false; - NETDATA.error(100, NETDATA.raphael_js); - }) - .always(function() { - if (typeof callback === "function") - return callback(); - }); - } else { - NETDATA.chartLibraries.raphael.enabled = false; - if (typeof callback === "function") - return callback(); - } -}; - -NETDATA.raphaelChartUpdate = function(state, data) { - $(state.element_chart).raphael(data.result, { - width: state.chartWidth(), - height: state.chartHeight() - }); - - return false; -}; - -NETDATA.raphaelChartCreate = function(state, data) { - $(state.element_chart).raphael(data.result, { - width: state.chartWidth(), - height: state.chartHeight() - }); - - return false; -}; diff --git a/web/gui/src/dashboard.js/charting/dygraph.js b/web/gui/src/dashboard.js/charting/dygraph.js index a60af18b8..f34d2f4aa 100644 --- a/web/gui/src/dashboard.js/charting/dygraph.js +++ b/web/gui/src/dashboard.js/charting/dygraph.js @@ -483,8 +483,28 @@ NETDATA.dygraphChartCreate = function (state, data) { NETDATA.globalSelectionSync.stop(); }, underlayCallback: function (canvas, area, g) { - // the chart is about to be drawn + + // update history_tip_element + if (state.tmp.dygraph_history_tip_element) { + const xHookRightSide = g.toDomXCoord(state.netdata_first); + if (xHookRightSide > area.x) { + state.tmp.dygraph_history_tip_element_displayed = true; + // group the styles for possible better performance + state.tmp.dygraph_history_tip_element.setAttribute( + 'style', + `display: block; left: ${area.x}px; right: calc(100% - ${xHookRightSide}px);` + ) + } else { + if (state.tmp.dygraph_history_tip_element_displayed) { + // additional check just for performance + // don't update the DOM when it's not needed + state.tmp.dygraph_history_tip_element.style.display = 'none'; + state.tmp.dygraph_history_tip_element_displayed = false; + } + } + } + // this function renders global highlighted time-frame if (NETDATA.globalChartUnderlay.isActive()) { @@ -953,6 +973,21 @@ NETDATA.dygraphChartCreate = function (state, data) { state.tmp.dygraph_instance = new Dygraph(state.element_chart, data.result.data, state.tmp.dygraph_options); + + state.tmp.dygraph_history_tip_element = document.createElement('div'); + state.tmp.dygraph_history_tip_element.innerHTML = ` + + Want to extend your history of real-time metrics? +
    + + Configure Netdata's history + or use the DB engine. +
    + `; + state.tmp.dygraph_history_tip_element.className = 'dygraph__history-tip'; + state.element_chart.appendChild(state.tmp.dygraph_history_tip_element); + + state.tmp.dygraph_force_zoom = false; state.tmp.dygraph_user_action = false; state.tmp.dygraph_last_rendered = Date.now(); diff --git a/web/gui/version.txt b/web/gui/version.txt new file mode 100644 index 000000000..3c59cf509 --- /dev/null +++ b/web/gui/version.txt @@ -0,0 +1 @@ +588ce5a7b18999dfa66698cd3a2f005f7a3c31cf diff --git a/web/server/Makefile.in b/web/server/Makefile.in new file mode 100644 index 000000000..89ab7279b --- /dev/null +++ b/web/server/Makefile.in @@ -0,0 +1,698 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/server +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CUPSCONFIG = @CUPSCONFIG@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CXX_BINARY = @CXX_BINARY@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +JSON_CFLAGS = @JSON_CFLAGS@ +JSON_LIBS = @JSON_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@ +LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@ +LIBCURL_CFLAGS = @LIBCURL_CFLAGS@ +LIBCURL_LIBS = @LIBCURL_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@ +LIBMONGOC_LIBS = @LIBMONGOC_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSSL_CFLAGS = @LIBSSL_CFLAGS@ +LIBSSL_LIBS = @LIBSSL_LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@ +OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@ +OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@ +OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@ +OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@ +OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@ +OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@ +OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@ +OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@ +OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@ +OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@ +OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@ +OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@ +PROTOBUF_LIBS = @PROTOBUF_LIBS@ +PROTOC = @PROTOC@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@ +XENLIGHT_LIBS = @XENLIGHT_LIBS@ +YAJL_CFLAGS = @YAJL_CFLAGS@ +YAJL_LIBS = @YAJL_LIBS@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + static \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/server/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/server/README.md b/web/server/README.md index 173e89596..30cb00699 100644 --- a/web/server/README.md +++ b/web/server/README.md @@ -22,9 +22,9 @@ With the web server enabled, you can control the number of threads and sockets w The default number of processor threads is `min(cpu cores, 6)`. -The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection. +The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection. -### Binding netdata to multiple ports +### Binding Netdata to multiple ports Netdata can bind to multiple IPs and ports, offering access to different services on each. Up to 100 sockets can be used (you can increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`). @@ -36,15 +36,15 @@ The ports to bind are controlled via `[web].bind to`, like this: bind to = 127.0.0.1=dashboard^SSL=optional 10.1.1.1:19998=management|netdata.conf hostname:19997=badges [::]:19996=streaming^SSL=force localhost:19995=registry *:http=dashboard unix:/tmp/netdata.sock ``` -Using the above, netdata will bind to: +Using the above, Netdata will bind to: -- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted. -- IPv4 10.1.1.1 at port 19998. The management API and netdata.conf will be accessible on this port. -- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port. -- All IPv6 IPs at port 19996. Only metric streaming requests from other netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. slaves also need to be [configured for TLS](../../streaming). -- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests. -- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port. -- Unix domain socket `/tmp/netdata.sock`. All requests are serviceable on this socket. +- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted. +- IPv4 10.1.1.1 at port 19998. The management API and `netdata.conf` will be accessible on this port. +- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port. +- All IPv6 IPs at port 19996. Only metric streaming requests from other Netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. slaves also need to be [configured for TLS](../../streaming). +- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests. +- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port. +- Unix domain socket `/tmp/netdata.sock`. All requests are serviceable on this socket. The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port. @@ -53,21 +53,22 @@ As shown in the example above, these permissions are optional, with the default The request types are strings identical to the `allow X from` directives of the access lists, i.e. `dashboard`, `streaming`, `registry`, `netdata.conf`, `badges` and `management`. The access lists themselves and the general setting `allow connections from` in the next section are applied regardless of the ports that are configured to provide these services. The API requests are serviced as follows: -- `dashboard` gives access to the UI, the read API and badges API calls. -- `badges` gives access only to the badges API calls. -- `management` gives access only to the management API calls. + +- `dashboard` gives access to the UI, the read API and badges API calls. +- `badges` gives access only to the badges API calls. +- `management` gives access only to the management API calls. ### Enabling TLS support Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data between a slave and its master, via the TLS 1.2 protocol. -Inbound unix socket connections are unaffected, regardless of the TLS settings. +Inbound unix socket connections are unaffected, regardless of the TLS settings.\ ??? info "Differences in TLS and SSL terminology" While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol, it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS` or `TLS/SSL`. To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`: -``` conf +```conf [web] ssl key = /etc/netdata/ssl/key.pem ssl certificate = /etc/netdata/ssl/cert.pem @@ -77,31 +78,31 @@ Both files must be readable by the `netdata` user. If either of these files do n For test purposes, you can generate self-signed certificates with the following command: -``` bash +```bash $ openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem ``` !!! note If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication. `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. You can verify the difference by running: - - ``` - $ openssl speed rsa2048 rsa4096 - ``` + +```sh +$ openssl speed rsa2048 rsa4096 +``` #### TLS/SSL enforcement When the certificates are defined and unless any other options are provided, a Netdata server will: -- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, netdata.conf and badges. -- Allow incoming slave connections to use both unencrypted and encrypted communications for streaming. - +- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, `netdata.conf` and badges. +- Allow incoming slave connections to use both unencrypted and encrypted communications for streaming. + To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, you can append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming. -SSL setting | HTTP requests | HTTPS requests | Unencrypted Streams | Encrypted Streams -:------:|:-----:|:-----:|:-----:|:-------- -none | Redirected to HTTPS | Accepted | Accepted | Accepted -`force` | Redirected to HTTPS | Accepted | Denied | Accepted -`optional` | Accepted | Accepted | Accepted | Accepted +|SSL setting|HTTP requests|HTTPS requests|Unencrypted Streams|Encrypted Streams| +|:---------:|:-----------:|:------------:|:-----------------:|:----------------| +|none|Redirected to HTTPS|Accepted|Accepted|Accepted| +|`force`|Redirected to HTTPS|Accepted|Denied|Accepted| +|`optional`|Accepted|Accepted|Accepted|Accepted| Example: @@ -121,10 +122,10 @@ When we define the use of SSL in a Netdata agent for different ports, Netdata w Netdata will: -- Force all HTTP requests to the default port to be redirected to HTTPS (same port). -- Refuse unencrypted streaming connections from slaves on the default port. -- Allow both HTTP and HTTPS requests to port 20000 for netdata.conf -- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001. +- Force all HTTP requests to the default port to be redirected to HTTPS (same port). +- Refuse unencrypted streaming connections from slaves on the default port. +- Allow both HTTP and HTTPS requests to port 20000 for `netdata.conf` +- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001. #### TLS/SSL errors @@ -150,51 +151,50 @@ Netdata supports access lists in `netdata.conf`: `*` does string matches on the IPs of the clients. -- `allow connections from` matches anyone that connects on the netdata port(s). - So, if someone is not allowed, it will be connected and disconnected immediately, without reading even - a single byte from its connection. This is a global settings with higher priority to any of the ones below. +- `allow connections from` matches anyone that connects on the Netdata port(s). + So, if someone is not allowed, it will be connected and disconnected immediately, without reading even + a single byte from its connection. This is a global settings with higher priority to any of the ones below. -- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the - dashboards do. +- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the + dashboards do. -- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`. +- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`. -- `allow streaming from` checks if the slave willing to stream metrics to this netdata is allowed. - This can be controlled per API KEY and MACHINE GUID in [stream.conf](../../streaming/stream.conf). - The setting in `netdata.conf` is checked before the ones in [stream.conf](../../streaming/stream.conf). +- `allow streaming from` checks if the slave willing to stream metrics to this Netdata is allowed. + This can be controlled per API KEY and MACHINE GUID in [stream.conf](../../streaming/stream.conf). + The setting in `netdata.conf` is checked before the ones in [stream.conf](../../streaming/stream.conf). -- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`. - The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`. +- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`. + The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to Netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`. -- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](../api/health/#health-management-api) +- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](../api/health/#health-management-api) ### Other netdata.conf [web] section options -setting | default | info -:------:|:-------:|:---- -ses max window | `15` | See [single exponential smoothing](../api/queries/des/) -des max window | `15` | See [double exponential smoothing](../api/queries/des/) -listen backlog | `4096` | The port backlog. Check `man 2 listen`. -web files owner | `netdata` | The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, netdata will only serve files owned by user given in `run as user`. -web files group | `netdata` | If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not. -disconnect idle clients after seconds | `60` | The time in seconds to disconnect web clients after being totally idle. -timeout for first request | `60` | How long to wait for a client to send a request before closing the socket. Prevents slow request attacks. -accept a streaming request every seconds | `0` | Can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves in a [streaming and replication setup](../../streaming) -respect do not track policy | `no` | If set to `yes`, will respect the client's browser preferences on storing cookies. -x-frame-options response header | | [Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options). -enable gzip compression | `yes` | When set to `yes`, netdata web responses will be GZIP compressed, if the web client accepts such responses. -gzip compression strategy | `default` | Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed` -gzip compression level | `3` | Valid levels are 1 (fastest) to 9 (best ratio) +|setting|default|info| +|:-----:|:-----:|:---| +|ses max window|`15`|See [single exponential smoothing](../api/queries/des/)| +|des max window|`15`|See [double exponential smoothing](../api/queries/des/)| +|listen backlog|`4096`|The port backlog. Check `man 2 listen`.| +|web files owner|`netdata`|The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`.| +|web files group|`netdata`|If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not.| +|disconnect idle clients after seconds|`60`|The time in seconds to disconnect web clients after being totally idle.| +|timeout for first request|`60`|How long to wait for a client to send a request before closing the socket. Prevents slow request attacks.| +|accept a streaming request every seconds|`0`|Can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves in a [streaming and replication setup](../../streaming)| +|respect do not track policy|`no`|If set to `yes`, will respect the client's browser preferences on storing cookies.| +|x-frame-options response header||[Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).| +|enable gzip compression|`yes`|When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses.| +|gzip compression strategy|`default`|Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed`| +|gzip compression level|`3`|Valid levels are 1 (fastest) to 9 (best ratio)| ## DDoS protection -If you publish your netdata to the internet, you may want to apply some protection against DDoS: - -1. Use the `static-threaded` web server (it is the default) -2. Use reasonable `[web].web server max sockets` (the default is) -3. Don't use all your cpu cores for netdata (lower `[web].web server threads`) -4. Run netdata with a low process scheduling priority (the default is the lowest) -5. If possible, proxy netdata via a full featured web server (nginx, apache, etc) +If you publish your Netdata to the internet, you may want to apply some protection against DDoS: +1. Use the `static-threaded` web server (it is the default) +2. Use reasonable `[web].web server max sockets` (the default is) +3. Don't use all your CPU cores for Netdata (lower `[web].web server threads`) +4. Run the `netdata` process with a low process scheduling priority (the default is the lowest) +5. If possible, proxy Netdata via a full featured web server (nginx, apache, etc) -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/web/server/static/Makefile.in b/web/server/static/Makefile.in new file mode 100644 index 000000000..8078caaf3 --- /dev/null +++ b/web/server/static/Makefile.in @@ -0,0 +1,697 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# SPDX-License-Identifier: GPL-3.0-or-later + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = web/server/static +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ + $(top_srcdir)/build/m4/ax_c__generic.m4 \ + $(top_srcdir)/build/m4/ax_c_lto.m4 \ + $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ + $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ + $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ + $(top_srcdir)/build/m4/ax_pthread.m4 \ + $(top_srcdir)/build/m4/jemalloc.m4 \ + $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(dist_noinst_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CUPSCONFIG = @CUPSCONFIG@ +CXX = @CXX@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CXX_BINARY = @CXX_BINARY@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ +IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ +JSON_CFLAGS = @JSON_CFLAGS@ +JSON_LIBS = @JSON_LIBS@ +LDFLAGS = @LDFLAGS@ +LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ +LIBCAP_LIBS = @LIBCAP_LIBS@ +LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@ +LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@ +LIBCURL_CFLAGS = @LIBCURL_CFLAGS@ +LIBCURL_LIBS = @LIBCURL_LIBS@ +LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ +LIBMNL_LIBS = @LIBMNL_LIBS@ +LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@ +LIBMONGOC_LIBS = @LIBMONGOC_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSSL_CFLAGS = @LIBSSL_CFLAGS@ +LIBSSL_LIBS = @LIBSSL_LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MATH_CFLAGS = @MATH_CFLAGS@ +MATH_LIBS = @MATH_LIBS@ +MKDIR_P = @MKDIR_P@ +NFACCT_CFLAGS = @NFACCT_CFLAGS@ +NFACCT_LIBS = @NFACCT_LIBS@ +OBJEXT = @OBJEXT@ +OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@ +OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@ +OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ +OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ +OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@ +OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@ +OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@ +OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@ +OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ +OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ +OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@ +OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@ +OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ +OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@ +OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@ +OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@ +OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@ +OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@ +OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@ +OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@ +OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ +OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@ +OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@ +OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@ +OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@ +OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@ +PROTOBUF_LIBS = @PROTOBUF_LIBS@ +PROTOC = @PROTOC@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSE_CANDIDATE = @SSE_CANDIDATE@ +STRIP = @STRIP@ +UUID_CFLAGS = @UUID_CFLAGS@ +UUID_LIBS = @UUID_LIBS@ +VERSION = @VERSION@ +XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@ +XENLIGHT_LIBS = @XENLIGHT_LIBS@ +YAJL_CFLAGS = @YAJL_CFLAGS@ +YAJL_LIBS = @YAJL_LIBS@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_pthread_config = @ax_pthread_config@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_target = @build_target@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +chartsdir = @chartsdir@ +configdir = @configdir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +has_jemalloc = @has_jemalloc@ +has_tcmalloc = @has_tcmalloc@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libconfigdir = @libconfigdir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +nodedir = @nodedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pluginsdir = @pluginsdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pythondir = @pythondir@ +registrydir = @registrydir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +varlibdir = @varlibdir@ +webdir = @webdir@ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/static/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu web/server/static/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-generic distclean-tags distdir dvi dvi-am \ + html html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ + pdf-am ps ps-am tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/web/server/static/README.md b/web/server/static/README.md index 653b364bc..eeba0e94d 100644 --- a/web/server/static/README.md +++ b/web/server/static/README.md @@ -7,4 +7,4 @@ The kernel distributes the incoming requests to them. Each thread uses non-blocking I/O so it can serve any number of web requests in parallel. This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. -[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/web/server/web_client.c b/web/server/web_client.c index 2da6c1dec..908e3a6a9 100644 --- a/web/server/web_client.c +++ b/web/server/web_client.c @@ -929,7 +929,6 @@ void web_client_split_path_query(struct web_client *w, char *s) { w->separator = 0x00; w->url_path_length = strlen(s); - w->url_search_path = NULL; } /** @@ -1035,20 +1034,22 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { // a valid complete HTTP request found *ue = '\0'; + //This is to avoid crash in line + w->url_search_path = NULL; if(w->mode != WEB_CLIENT_MODE_NORMAL) { if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1)) return HTTP_VALIDATION_MALFORMED_URL; } else { web_client_split_path_query(w, encoded_url); - if (w->separator) { + if (w->url_search_path && w->separator) { *w->url_search_path = 0x00; } if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1)) return HTTP_VALIDATION_MALFORMED_URL; - if (w->separator) { + if (w->url_search_path && w->separator) { *w->url_search_path = w->separator; char *from = (encoded_url + w->url_path_length); @@ -1064,9 +1065,6 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) { // copy the URL - we are going to overwrite parts of it // TODO -- ideally we we should avoid copying buffers around strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE); - if (w->separator) { - *w->url_search_path = 0x00; - } #ifdef ENABLE_HTTPS if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) { if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) { -- cgit v1.2.3