summaryrefslogtreecommitdiffstats
path: root/web/server
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:19:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:20:17 +0000
commita64a253794ac64cb40befee54db53bde17dd0d49 (patch)
treec1024acc5f6e508814b944d99f112259bb28b1be /web/server
parentNew upstream version 1.10.0+dfsg (diff)
downloadnetdata-a64a253794ac64cb40befee54db53bde17dd0d49.tar.xz
netdata-a64a253794ac64cb40befee54db53bde17dd0d49.zip
New upstream version 1.11.0+dfsgupstream/1.11.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'web/server')
-rw-r--r--web/server/Makefile.am14
-rw-r--r--web/server/Makefile.in650
-rw-r--r--web/server/README.md107
-rw-r--r--web/server/multi/Makefile.am11
-rw-r--r--web/server/multi/Makefile.in647
-rw-r--r--web/server/multi/README.md8
-rw-r--r--web/server/multi/multi-threaded.c314
-rw-r--r--web/server/multi/multi-threaded.h10
-rw-r--r--web/server/single/Makefile.am11
-rw-r--r--web/server/single/Makefile.in647
-rw-r--r--web/server/single/README.md6
-rw-r--r--web/server/single/single-threaded.c194
-rw-r--r--web/server/single/single-threaded.h10
-rw-r--r--web/server/static/Makefile.am11
-rw-r--r--web/server/static/Makefile.in647
-rw-r--r--web/server/static/README.md9
-rw-r--r--web/server/static/static-threaded.c424
-rw-r--r--web/server/static/static-threaded.h10
-rw-r--r--web/server/web_client.c1665
-rw-r--r--web/server/web_client.h193
-rw-r--r--web/server/web_client_cache.c231
-rw-r--r--web/server/web_client_cache.h31
-rw-r--r--web/server/web_server.c146
-rw-r--r--web/server/web_server.h60
24 files changed, 6056 insertions, 0 deletions
diff --git a/web/server/Makefile.am b/web/server/Makefile.am
new file mode 100644
index 000000000..843c4cc9b
--- /dev/null
+++ b/web/server/Makefile.am
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ single \
+ multi \
+ static \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/Makefile.in b/web/server/Makefile.in
new file mode 100644
index 000000000..4b2614c5a
--- /dev/null
+++ b/web/server/Makefile.in
@@ -0,0 +1,650 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/server
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ single \
+ multi \
+ static \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/server/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/server/README.md b/web/server/README.md
new file mode 100644
index 000000000..8a6cad139
--- /dev/null
+++ b/web/server/README.md
@@ -0,0 +1,107 @@
+# netdata web server
+
+netdata supports 3 implementation of its internal web server:
+
+- `static-threaded` is a web server with a fix (configured number of threads)
+- `single-threaded` is a simple web server running with a single thread
+- `multi-threaded` is a web server that spawns a thread for each client connection
+- `none` to disable the web server
+
+We suggest to use the `static-threaded` one. It is the most efficient.
+
+All versions of the web servers use non-blocking I/O.
+
+All web servers respect the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+
+## Configuration
+
+#### selecting the web server
+
+You can select the web server implementation by editing `netdata.conf` and setting:
+
+```
+[web]
+ mode = none | single-threaded | multi-threaded | static-threaded
+```
+
+The `static` web server supports also these settings:
+
+```
+[web]
+ mode = static-threaded
+ web server threads = 4
+ web server max sockets = 512
+```
+
+The default number of processor threads is `min(cpu cores, 6)`.
+
+The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files
+netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors
+to be available for data collection.
+
+#### binding netdata to multiple ports
+
+netdata can bind to multiple IPs and ports. Up to 100 sockets can be used
+(you can increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`).
+
+The ports to bind are controlled via `[web].bind to`, like this:
+
+```
+[web]
+ default port = 19999
+ bind to = 127.0.0.1 10.1.1.1:19998 hostname:19997 [::]:19996 localhost:19995 *:http unix:/tmp/netdata.sock
+```
+
+Using the above, netdata will bind to:
+ - IPv4 127.0.0.1 at port 19999 (port was used from `default port`)
+ - IPv4 10.1.1.1 at port 19998
+ - All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997
+ - All IPv6 IPs at port 19996
+ - All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996
+ - All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`
+ - Unix domain socket `/tmp/netdata.sock`
+
+The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port.
+
+#### access lists
+
+Netdata supports access lists in `netdata.conf`:
+
+```
+[web]
+ allow connections from = localhost *
+ allow dashboard from = localhost *
+ allow badges from = *
+ allow streaming from = *
+ allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*
+```
+
+`*` does string matches on the IPs of the clients.
+
+- `allow connections from` matches anyone that connects on the netdata port(s).
+ So, if someone is not allowed, it will be connected and disconnected immediately, without reading even
+ a single byte from its connection. This is a global settings with higher priority to any of the ones below.
+
+- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the
+ dashboards do.
+
+- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`.
+
+- `allow streaming from` checks if the slave willing to stream metrics to this netdata is allowed.
+ This can be controlled per API KEY and MACHINE GUID in [stream.conf](../../streaming/stream.conf).
+ The setting in `netdata.conf` is checked before the ones in [stream.conf](../../streaming/stream.conf).
+
+- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`.
+ By default it allows only private lans.
+
+## DDoS protection
+
+If you publish your netdata to the internet, you may want to apply some protection against DDoS:
+
+1. Use the `static-threaded` web server (it is the default)
+2. Use reasonable `[web].web server max sockets` (the default is)
+3. Don't use all your cpu cores for netdata (lower `[web].web server threads`)
+4. Run netdata with a low process scheduling priority (the default is the lowest)
+5. If possible, proxy netdata via a full featured web server (nginx, apache, etc)
+
diff --git a/web/server/multi/Makefile.am b/web/server/multi/Makefile.am
new file mode 100644
index 000000000..90cc9ca1e
--- /dev/null
+++ b/web/server/multi/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/multi/Makefile.in b/web/server/multi/Makefile.in
new file mode 100644
index 000000000..61ef9455f
--- /dev/null
+++ b/web/server/multi/Makefile.in
@@ -0,0 +1,647 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/server/multi
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/multi/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/server/multi/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/server/multi/README.md b/web/server/multi/README.md
new file mode 100644
index 000000000..f51073e93
--- /dev/null
+++ b/web/server/multi/README.md
@@ -0,0 +1,8 @@
+# `multi-threaded` web server
+
+The `multi-threaded` web server spawns a thread for each connection it receives.
+
+Each thread uses non-blocking I/O so it can serve any number of web requests in parallel,
+though this is not supported by HTTP, so in practice each thread serves all the requests sequentially.
+
+Each thread respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. \ No newline at end of file
diff --git a/web/server/multi/multi-threaded.c b/web/server/multi/multi-threaded.c
new file mode 100644
index 000000000..37bdd38ad
--- /dev/null
+++ b/web/server/multi/multi-threaded.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "multi-threaded.h"
+
+// --------------------------------------------------------------------------------------
+// the thread of a single client - for the MULTI-THREADED web server
+
+// 1. waits for input and output, using async I/O
+// 2. it processes HTTP requests
+// 3. it generates HTTP responses
+// 4. it copies data from input to output if mode is FILECOPY
+
+int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS;
+int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST;
+long web_client_streaming_rate_t = 0L;
+
+static void multi_threaded_web_client_worker_main_cleanup(void *ptr) {
+ struct web_client *w = ptr;
+ WEB_CLIENT_IS_DEAD(w);
+ w->running = 0;
+}
+
+static void *multi_threaded_web_client_worker_main(void *ptr) {
+ netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr);
+
+ struct web_client *w = ptr;
+ w->running = 1;
+
+ struct pollfd fds[2], *ifd, *ofd;
+ int retval, timeout_ms;
+ nfds_t fdmax = 0;
+
+ while(!netdata_exit) {
+ if(unlikely(web_client_check_dead(w))) {
+ debug(D_WEB_CLIENT, "%llu: client is dead.", w->id);
+ break;
+ }
+ else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) {
+ debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id);
+ break;
+ }
+
+ if(unlikely(w->ifd < 0 || w->ofd < 0)) {
+ error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd);
+ break;
+ }
+
+ if(w->ifd == w->ofd) {
+ fds[0].fd = w->ifd;
+ fds[0].events = 0;
+ fds[0].revents = 0;
+
+ if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
+ if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT;
+
+ fds[1].fd = -1;
+ fds[1].events = 0;
+ fds[1].revents = 0;
+
+ ifd = ofd = &fds[0];
+
+ fdmax = 1;
+ }
+ else {
+ fds[0].fd = w->ifd;
+ fds[0].events = 0;
+ fds[0].revents = 0;
+ if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
+ ifd = &fds[0];
+
+ fds[1].fd = w->ofd;
+ fds[1].events = 0;
+ fds[1].revents = 0;
+ if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT;
+ ofd = &fds[1];
+
+ fdmax = 2;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
+ errno = 0;
+ timeout_ms = web_client_timeout * 1000;
+ retval = poll(fds, fdmax, timeout_ms);
+
+ if(unlikely(netdata_exit)) break;
+
+ if(unlikely(retval == -1)) {
+ if(errno == EAGAIN || errno == EINTR) {
+ debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id);
+ continue;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd);
+ break;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
+ break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ int used = 0;
+ if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) {
+ used++;
+ if(web_client_send(w) < 0) {
+ debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
+ break;
+ }
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) {
+ used++;
+ if(web_client_receive(w) < 0) {
+ debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id);
+ break;
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_NORMAL) {
+ debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id);
+ web_client_process_request(w);
+
+ // if the sockets are closed, may have transferred this client
+ // to plugins.d
+ if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM))
+ break;
+ }
+ }
+
+ if(unlikely(!used)) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id);
+ break;
+ }
+ }
+
+ if(w->mode != WEB_CLIENT_MODE_STREAM)
+ web_server_log_connection(w, "DISCONNECTED");
+
+ web_client_request_done(w);
+
+ debug(D_WEB_CLIENT, "%llu: done...", w->id);
+
+ // close the sockets/files now
+ // to free file descriptors
+ if(w->ifd == w->ofd) {
+ if(w->ifd != -1) close(w->ifd);
+ }
+ else {
+ if(w->ifd != -1) close(w->ifd);
+ if(w->ofd != -1) close(w->ofd);
+ }
+ w->ifd = -1;
+ w->ofd = -1;
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - MULTI-THREADED
+
+// 1. it accepts new incoming requests on our port
+// 2. creates a new web_client for each connection received
+// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients)
+// 4. cleans up old web_clients that their netdata_threads have been exited
+
+static void web_client_multi_threaded_web_server_release_clients(void) {
+ struct web_client *w;
+ for(w = web_clients_cache.used; w ; ) {
+ if(unlikely(!w->running && web_client_check_dead(w))) {
+ struct web_client *t = w->next;
+ web_client_release(w);
+ w = t;
+ }
+ else
+ w = w->next;
+ }
+}
+
+static void web_client_multi_threaded_web_server_stop_all_threads(void) {
+ struct web_client *w;
+
+ int found = 1;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+ for(w = web_clients_cache.used; w ; w = w->next) {
+ if(w->running) {
+ found++;
+ info("stopping web client %s, id %llu", w->client_ip, w->id);
+ netdata_thread_cancel(w->thread);
+ }
+ }
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d web threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+ for(w = web_clients_cache.used; w ; w = w->next)
+ if(w->running) found++;
+ }
+
+ if(found)
+ error("%d web threads are taking too long to finish. Giving up.", found);
+}
+
+static struct pollfd *socket_listen_main_multi_threaded_fds = NULL;
+
+static void socket_listen_main_multi_threaded_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ info("releasing allocated memory...");
+ freez(socket_listen_main_multi_threaded_fds);
+
+ info("closing all sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("stopping all running web server threads...");
+ web_client_multi_threaded_web_server_stop_all_threads();
+
+ info("freeing web clients cache...");
+ web_client_cache_destroy();
+
+ info("cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+#define CLEANUP_EVERY_EVENTS 60
+void *socket_listen_main_multi_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr);
+
+ web_server_mode = WEB_SERVER_MODE_MULTI_THREADED;
+ web_server_is_multithreaded = 1;
+
+ struct web_client *w;
+ int retval, counter = 0;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: No sockets to listen to.");
+
+ socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened);
+
+ size_t i;
+ for(i = 0; i < api_sockets.opened ;i++) {
+ socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i];
+ socket_listen_main_multi_threaded_fds[i].events = POLLIN;
+ socket_listen_main_multi_threaded_fds[i].revents = 0;
+
+ info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
+ }
+
+ int timeout_ms = 1 * 1000;
+
+ while(!netdata_exit) {
+
+ // debug(D_WEB_CLIENT, "LISTENER: Waiting...");
+ retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms);
+
+ if(unlikely(retval == -1)) {
+ error("LISTENER: poll() failed.");
+ continue;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_WEB_CLIENT, "LISTENER: poll() timeout.");
+ counter++;
+ continue;
+ }
+
+ for(i = 0 ; i < api_sockets.opened ; i++) {
+ short int revents = socket_listen_main_multi_threaded_fds[i].revents;
+
+ // check for new incoming connections
+ if(revents & POLLIN || revents & POLLPRI) {
+ socket_listen_main_multi_threaded_fds[i].revents = 0;
+
+ w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd);
+ if(unlikely(!w)) {
+ // no need for error log - web_client_create_on_listenfd already logged the error
+ continue;
+ }
+
+ if(api_sockets.fds_families[i] == AF_UNIX)
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port);
+
+ w->running = 1;
+ if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) {
+ w->running = 0;
+ web_client_release(w);
+ }
+ }
+ }
+
+ counter++;
+ if(counter > CLEANUP_EVERY_EVENTS) {
+ counter = 0;
+ web_client_multi_threaded_web_server_release_clients();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
diff --git a/web/server/multi/multi-threaded.h b/web/server/multi/multi-threaded.h
new file mode 100644
index 000000000..d7ebf3c54
--- /dev/null
+++ b/web/server/multi/multi-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_MULTI_THREADED_H
+#define NETDATA_WEB_SERVER_MULTI_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_multi_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_MULTI_THREADED_H
diff --git a/web/server/single/Makefile.am b/web/server/single/Makefile.am
new file mode 100644
index 000000000..90cc9ca1e
--- /dev/null
+++ b/web/server/single/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/single/Makefile.in b/web/server/single/Makefile.in
new file mode 100644
index 000000000..a8ae2a35b
--- /dev/null
+++ b/web/server/single/Makefile.in
@@ -0,0 +1,647 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/server/single
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/single/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/server/single/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/server/single/README.md b/web/server/single/README.md
new file mode 100644
index 000000000..df5fe56e3
--- /dev/null
+++ b/web/server/single/README.md
@@ -0,0 +1,6 @@
+# `single-threaded` web server
+
+The `single-threaded` web server runs as a single thread inside netdata.
+It uses non-blocking I/O so it can serve any number of web requests in parallel.
+
+This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. \ No newline at end of file
diff --git a/web/server/single/single-threaded.c b/web/server/single/single-threaded.c
new file mode 100644
index 000000000..7e89ee683
--- /dev/null
+++ b/web/server/single/single-threaded.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "single-threaded.h"
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - SINGLE-THREADED
+
+struct web_client *single_threaded_clients[FD_SETSIZE];
+
+static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) {
+ return 1;
+ }
+
+ if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) {
+ error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE);
+ return 1;
+ }
+
+ FD_SET(w->ifd, efds);
+ if(unlikely(*max < w->ifd)) *max = w->ifd;
+
+ if(unlikely(w->ifd != w->ofd)) {
+ if(*max < w->ofd) *max = w->ofd;
+ FD_SET(w->ofd, efds);
+ }
+
+ if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds);
+ if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds);
+
+ single_threaded_clients[w->ifd] = w;
+ single_threaded_clients[w->ofd] = w;
+
+ return 0;
+}
+
+static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) {
+ FD_CLR(w->ifd, efds);
+ if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds);
+
+ if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds);
+ if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds);
+
+ single_threaded_clients[w->ifd] = NULL;
+ single_threaded_clients[w->ofd] = NULL;
+
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void socket_listen_main_single_threaded_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("closing all sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("freeing web clients cache...");
+ web_client_cache_destroy();
+
+ info("cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_single_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr);
+ web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED;
+ web_server_is_multithreaded = 0;
+
+ struct web_client *w;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+ size_t i;
+ for(i = 0; i < (size_t)FD_SETSIZE ; i++)
+ single_threaded_clients[i] = NULL;
+
+ fd_set ifds, ofds, efds, rifds, rofds, refds;
+ FD_ZERO (&ifds);
+ FD_ZERO (&ofds);
+ FD_ZERO (&efds);
+ int fdmax = 0;
+
+ for(i = 0; i < api_sockets.opened ; i++) {
+ if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE)
+ fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]);
+
+ info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
+
+ FD_SET(api_sockets.fds[i], &ifds);
+ FD_SET(api_sockets.fds[i], &efds);
+ if(fdmax < api_sockets.fds[i])
+ fdmax = api_sockets.fds[i];
+ }
+
+ while(!netdata_exit) {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax);
+
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
+ rifds = ifds;
+ rofds = ofds;
+ refds = efds;
+ int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv);
+
+ if(unlikely(retval == -1)) {
+ error("LISTENER: select() failed.");
+ continue;
+ }
+ else if(likely(retval)) {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something.");
+
+ for(i = 0; i < api_sockets.opened ; i++) {
+ if (FD_ISSET(api_sockets.fds[i], &rifds)) {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection.");
+ w = web_client_create_on_listenfd(api_sockets.fds[i]);
+ if(unlikely(!w))
+ continue;
+
+ if(api_sockets.fds_families[i] == AF_UNIX)
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
+ if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) {
+ web_client_release(w);
+ }
+ }
+ }
+
+ for(i = 0 ; i <= (size_t)fdmax ; i++) {
+ if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds)))
+ continue;
+
+ w = single_threaded_clients[i];
+ if(unlikely(!w)) {
+ // error("no client on slot %zu", i);
+ continue;
+ }
+
+ if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) {
+ // error("failed to unlink client %zu", i);
+ web_client_release(w);
+ continue;
+ }
+
+ if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) {
+ // error("no input on client %zu", i);
+ web_client_release(w);
+ continue;
+ }
+
+ if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) {
+ if (unlikely(web_client_receive(w) < 0)) {
+ // error("cannot read from client %zu", i);
+ web_client_release(w);
+ continue;
+ }
+
+ if (w->mode != WEB_CLIENT_MODE_FILECOPY) {
+ debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id);
+ web_client_process_request(w);
+ }
+ }
+
+ if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) {
+ if (unlikely(web_client_send(w) < 0)) {
+ // error("cannot send data to client %zu", i);
+ debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
+ web_client_release(w);
+ continue;
+ }
+ }
+
+ if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) {
+ // error("failed to link client %zu", i);
+ web_client_release(w);
+ }
+ }
+ }
+ else {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout.");
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
diff --git a/web/server/single/single-threaded.h b/web/server/single/single-threaded.h
new file mode 100644
index 000000000..fab4ceba1
--- /dev/null
+++ b/web/server/single/single-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_SINGLE_THREADED_H
+#define NETDATA_WEB_SERVER_SINGLE_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_single_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_SINGLE_THREADED_H
diff --git a/web/server/static/Makefile.am b/web/server/static/Makefile.am
new file mode 100644
index 000000000..90cc9ca1e
--- /dev/null
+++ b/web/server/static/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/static/Makefile.in b/web/server/static/Makefile.in
new file mode 100644
index 000000000..f9dda4fa2
--- /dev/null
+++ b/web/server/static/Makefile.in
@@ -0,0 +1,647 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/server/static
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/static/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/server/static/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/server/static/README.md b/web/server/static/README.md
new file mode 100644
index 000000000..16febdb32
--- /dev/null
+++ b/web/server/static/README.md
@@ -0,0 +1,9 @@
+# `static-threaded` web server
+
+The `static-threaded` web server spawns a fixed number of threads.
+All the threads are concurrently listening for web requests on the same sockets.
+The kernel distributes the incoming requests to them.
+
+Each thread uses non-blocking I/O so it can serve any number of web requests in parallel.
+
+This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. \ No newline at end of file
diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c
new file mode 100644
index 000000000..56b8dbf8d
--- /dev/null
+++ b/web/server/static/static-threaded.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "static-threaded.h"
+
+// ----------------------------------------------------------------------------
+// high level web clients connection management
+
+static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache_or_allocate();
+ w->ifd = w->ofd = fd;
+
+ strncpyz(w->client_ip, client_ip, sizeof(w->client_ip) - 1);
+ strncpyz(w->client_port, client_port, sizeof(w->client_port) - 1);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+
+ web_client_initialize_connection(w);
+ return(w);
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - STATIC-THREADED
+
+struct web_server_static_threaded_worker {
+ netdata_thread_t thread;
+
+ int id;
+ int running;
+
+ size_t max_sockets;
+
+ volatile size_t connected;
+ volatile size_t disconnected;
+ volatile size_t receptions;
+ volatile size_t sends;
+ volatile size_t max_concurrent;
+
+ volatile size_t files_read;
+ volatile size_t file_reads;
+};
+
+static long long static_threaded_workers_count = 1;
+static struct web_server_static_threaded_worker *static_workers_private_data = NULL;
+static __thread struct web_server_static_threaded_worker *worker_private = NULL;
+
+// ----------------------------------------------------------------------------
+
+static inline int web_server_check_client_status(struct web_client *w) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
+ return -1;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// web server files
+
+static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) {
+ struct web_client *w = (struct web_client *)data;
+
+ worker_private->files_read++;
+
+ debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd);
+ *events = POLLIN;
+ pi->data = w;
+ return w;
+}
+
+static void web_werver_file_del_callback(POLLINFO *pi) {
+ struct web_client *w = (struct web_client *)pi->data;
+ debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd);
+
+ w->pollinfo_filecopy_slot = 0;
+
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_file_read_callback(POLLINFO *pi, short int *events) {
+ struct web_client *w = (struct web_client *)pi->data;
+
+ // if there is no POLLINFO linked to this, it means the client disconnected
+ // stop the file reading too
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd);
+
+ worker_private->file_reads++;
+ ssize_t ret = unlikely(web_client_read_file(w));
+
+ if(likely(web_client_has_wait_send(w))) {
+ POLLJOB *p = pi->p; // our POLLJOB
+ POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket
+
+ debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd);
+ p->fds[wpi->slot].events |= POLLOUT;
+ }
+
+ if(unlikely(ret <= 0 || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd);
+ return -1;
+ }
+
+ *events = POLLIN;
+ return 0;
+}
+
+static int web_server_file_write_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ error("Writing to web files is not supported!");
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// web server clients
+
+static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)data;
+
+ worker_private->connected++;
+
+ size_t concurrent = worker_private->connected - worker_private->disconnected;
+ if(unlikely(concurrent > worker_private->max_concurrent))
+ worker_private->max_concurrent = concurrent;
+
+ *events = POLLIN;
+
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd);
+ struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port);
+ w->pollinfo_slot = pi->slot;
+
+ if(unlikely(pi->socktype == AF_UNIX))
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
+ debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd);
+ return w;
+}
+
+// TCP client disconnected
+static void web_server_del_callback(POLLINFO *pi) {
+ worker_private->disconnected++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+
+ w->pollinfo_slot = 0;
+ if(unlikely(w->pollinfo_filecopy_slot)) {
+ POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket
+ (void)fpi;
+
+ debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd);
+ }
+ else {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET))
+ pi->flags |= POLLINFO_FLAG_DONT_CLOSE;
+
+ debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_rcv_callback(POLLINFO *pi, short int *events) {
+ worker_private->receptions++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ if(unlikely(web_client_receive(w) < 0))
+ return -1;
+
+ debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd);
+ web_client_process_request(w);
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->pollinfo_filecopy_slot == 0) {
+ debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd);
+
+ if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) {
+ // add a new socket to poll_events, with the same
+ debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd);
+
+ POLLINFO *fpi = poll_add_fd(
+ pi->p
+ , w->ifd
+ , 0
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , "FILENAME"
+ , ""
+ , web_server_file_add_callback
+ , web_werver_file_del_callback
+ , web_server_file_read_callback
+ , web_server_file_write_callback
+ , (void *) w
+ );
+
+ if(fpi)
+ w->pollinfo_filecopy_slot = fpi->slot;
+ else {
+ error("Failed to add filecopy fd. Closing client.");
+ return -1;
+ }
+ }
+ }
+ }
+ else {
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+ }
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static int web_server_snd_callback(POLLINFO *pi, short int *events) {
+ worker_private->sends++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd);
+
+ if(unlikely(web_client_send(w) < 0))
+ return -1;
+
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static void web_server_tmr_callback(void *timer_data) {
+ worker_private = (struct web_server_static_threaded_worker *)timer_data;
+
+ static __thread RRDSET *st = NULL;
+ static __thread RRDDIM *rd_user = NULL, *rd_system = NULL;
+
+ if(unlikely(!st)) {
+ char id[100 + 1];
+ char title[100 + 1];
+
+ snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1);
+ snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1);
+
+ st = rrdset_create_localhost(
+ "netdata"
+ , id
+ , NULL
+ , "web"
+ , "netdata.web_cpu"
+ , title
+ , "milliseconds/s"
+ , "web"
+ , "stats"
+ , 132000 + worker_private->id
+ , default_rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st);
+
+ struct rusage rusage;
+ getrusage(RUSAGE_THREAD, &rusage);
+ rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec);
+ rrdset_done(st);
+}
+
+// ----------------------------------------------------------------------------
+// web server worker thread
+
+static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+
+ info("freeing local web clients cache...");
+ web_client_cache_destroy();
+
+ info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends",
+ worker_private->connected,
+ worker_private->disconnected,
+ worker_private->max_concurrent,
+ worker_private->receptions,
+ worker_private->sends
+ );
+
+ worker_private->running = 0;
+}
+
+void *socket_listen_main_static_threaded_worker(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+ worker_private->running = 1;
+
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr);
+
+ poll_events(&api_sockets
+ , web_server_add_callback
+ , web_server_del_callback
+ , web_server_rcv_callback
+ , web_server_snd_callback
+ , web_server_tmr_callback
+ , web_allow_connections_from
+ , NULL
+ , web_client_first_request_timeout
+ , web_client_timeout
+ , default_rrd_update_every * 1000 // timer_milliseconds
+ , ptr // timer_data
+ , worker_private->max_sockets
+ );
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// web server main thread - also becomes a worker
+
+static void socket_listen_main_static_threaded_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ int i, found = 0;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if(static_workers_private_data[i].running) {
+ found++;
+ info("stopping worker %d", i + 1);
+ netdata_thread_cancel(static_workers_private_data[i].thread);
+ }
+ else
+ info("found stopped worker %d", i + 1);
+ }
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d static web threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if (static_workers_private_data[i].running)
+ found++;
+ }
+ }
+
+ if(found)
+ error("%d static web threads are taking too long to finish. Giving up.", found);
+
+ info("closing all web server sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("all static web threads stopped.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_static_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr);
+ web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+ // 6 threads is the optimal value
+ // since 6 are the parallel connections browsers will do
+ // so, if the machine has more CPUs, avoid using resources unnecessarily
+ int def_thread_count = (processors > 6)?6:processors;
+
+ static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
+ if(static_threaded_workers_count < 1) static_threaded_workers_count = 1;
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2));
+
+ static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker));
+
+ web_server_is_multithreaded = (static_threaded_workers_count > 1);
+
+ int i;
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ static_workers_private_data[i].id = i;
+ static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count;
+
+ char tag[50 + 1];
+ snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1);
+
+ info("starting worker %d", i+1);
+ netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]);
+ }
+
+ // and the main one
+ static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count;
+ socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]);
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/web/server/static/static-threaded.h b/web/server/static/static-threaded.h
new file mode 100644
index 000000000..5f4862e5b
--- /dev/null
+++ b/web/server/static/static-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_STATIC_THREADED_H
+#define NETDATA_WEB_SERVER_STATIC_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_static_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_STATIC_THREADED_H
diff --git a/web/server/web_client.c b/web/server/web_client.c
new file mode 100644
index 000000000..282cfcd1a
--- /dev/null
+++ b/web/server/web_client.c
@@ -0,0 +1,1665 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_client.h"
+
+// this is an async I/O implementation of the web server request parser
+// it is used by all netdata web servers
+
+int respect_web_browser_do_not_track_policy = 0;
+char *web_x_frame_options = NULL;
+
+#ifdef NETDATA_WITH_ZLIB
+int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY;
+#endif /* NETDATA_WITH_ZLIB */
+
+inline int web_client_permission_denied(struct web_client *w) {
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "You are not allowed to access this resource.");
+ w->response.code = 403;
+ return 403;
+}
+
+static inline int web_client_crock_socket(struct web_client *w) {
+#ifdef TCP_CORK
+ if(likely(web_client_is_corkable(w) && !w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = 1;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ error("%llu: failed to enable TCP_CORK on socket.", w->id);
+
+ w->tcp_cork = 0;
+ return -1;
+ }
+ }
+#else
+ (void)w;
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+static inline int web_client_uncrock_socket(struct web_client *w) {
+#ifdef TCP_CORK
+ if(likely(w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = 0;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ error("%llu: failed to disable TCP_CORK on socket.", w->id);
+ w->tcp_cork = 1;
+ return -1;
+ }
+ }
+#else
+ (void)w;
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+static inline char *strip_control_characters(char *url) {
+ char *s = url;
+ if(!s) return "";
+
+ if(iscntrl(*s)) *s = ' ';
+ while(*++s) {
+ if(iscntrl(*s)) *s = ' ';
+ }
+
+ return url;
+}
+
+void web_client_request_done(struct web_client *w) {
+ web_client_uncrock_socket(w);
+
+ debug(D_WEB_CLIENT, "%llu: Resetting client.", w->id);
+
+ if(likely(w->last_url[0])) {
+ struct timeval tv;
+ now_realtime_timeval(&tv);
+
+ size_t size = (w->mode == WEB_CLIENT_MODE_FILECOPY)?w->response.rlen:w->response.data->len;
+ size_t sent = size;
+#ifdef NETDATA_WITH_ZLIB
+ if(likely(w->response.zoutput)) sent = (size_t)w->response.zstream.total_out;
+#endif
+
+ // --------------------------------------------------------------------
+ // global statistics
+
+ finished_web_request_statistics(dt_usec(&tv, &w->tv_in),
+ w->stats_received_bytes,
+ w->stats_sent_bytes,
+ size,
+ sent);
+
+ w->stats_received_bytes = 0;
+ w->stats_sent_bytes = 0;
+
+
+ // --------------------------------------------------------------------
+
+ const char *mode;
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_FILECOPY:
+ mode = "FILECOPY";
+ break;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ mode = "OPTIONS";
+ break;
+
+ case WEB_CLIENT_MODE_STREAM:
+ mode = "STREAM";
+ break;
+
+ case WEB_CLIENT_MODE_NORMAL:
+ mode = "DATA";
+ break;
+
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+
+ // access log
+ log_access("%llu: %d '[%s]:%s' '%s' (sent/all = %zu/%zu bytes %0.0f%%, prep/sent/total = %0.2f/%0.2f/%0.2f ms) %d '%s'",
+ w->id
+ , gettid()
+ , w->client_ip
+ , w->client_port
+ , mode
+ , sent
+ , size
+ , -((size > 0) ? ((size - sent) / (double) size * 100.0) : 0.0)
+ , dt_usec(&w->tv_ready, &w->tv_in) / 1000.0
+ , dt_usec(&tv, &w->tv_ready) / 1000.0
+ , dt_usec(&tv, &w->tv_in) / 1000.0
+ , w->response.code
+ , strip_control_characters(w->last_url)
+ );
+ }
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->ifd != w->ofd) {
+ debug(D_WEB_CLIENT, "%llu: Closing filecopy input file descriptor %d.", w->id, w->ifd);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1) close(w->ifd);
+ }
+
+ w->ifd = w->ofd;
+ }
+ }
+
+ w->last_url[0] = '\0';
+ w->cookie1[0] = '\0';
+ w->cookie2[0] = '\0';
+ w->origin[0] = '*';
+ w->origin[1] = '\0';
+
+ freez(w->user_agent); w->user_agent = NULL;
+
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ w->tcp_cork = 0;
+ web_client_disable_donottrack(w);
+ web_client_disable_tracking_required(w);
+ web_client_disable_keepalive(w);
+ w->decoded_url[0] = '\0';
+
+ buffer_reset(w->response.header_output);
+ buffer_reset(w->response.header);
+ buffer_reset(w->response.data);
+ w->response.rlen = 0;
+ w->response.sent = 0;
+ w->response.code = 0;
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+
+ w->response.zoutput = 0;
+
+ // if we had enabled compression, release it
+#ifdef NETDATA_WITH_ZLIB
+ if(w->response.zinitialized) {
+ debug(D_DEFLATE, "%llu: Freeing compression resources.", w->id);
+ deflateEnd(&w->response.zstream);
+ w->response.zsent = 0;
+ w->response.zhave = 0;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_in = 0;
+ w->response.zstream.total_out = 0;
+ w->response.zinitialized = 0;
+ }
+#endif // NETDATA_WITH_ZLIB
+}
+
+uid_t web_files_uid(void) {
+ static char *web_owner = NULL;
+ static uid_t owner_uid = 0;
+
+ if(unlikely(!web_owner)) {
+ // getpwuid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct passwd *pw = getpwuid(geteuid());
+ web_owner = config_get(CONFIG_SECTION_WEB, "web files owner", (pw)?(pw->pw_name?pw->pw_name:""):"");
+ if(!web_owner || !*web_owner)
+ owner_uid = geteuid();
+ else {
+ // getpwnam() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ pw = getpwnam(web_owner);
+ if(!pw) {
+ error("User '%s' is not present. Ignoring option.", web_owner);
+ owner_uid = geteuid();
+ }
+ else {
+ debug(D_WEB_CLIENT, "Web files owner set to %s.", web_owner);
+ owner_uid = pw->pw_uid;
+ }
+ }
+ }
+
+ return(owner_uid);
+}
+
+gid_t web_files_gid(void) {
+ static char *web_group = NULL;
+ static gid_t owner_gid = 0;
+
+ if(unlikely(!web_group)) {
+ // getgrgid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct group *gr = getgrgid(getegid());
+ web_group = config_get(CONFIG_SECTION_WEB, "web files group", (gr)?(gr->gr_name?gr->gr_name:""):"");
+ if(!web_group || !*web_group)
+ owner_gid = getegid();
+ else {
+ // getgrnam() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ gr = getgrnam(web_group);
+ if(!gr) {
+ error("Group '%s' is not present. Ignoring option.", web_group);
+ owner_gid = getegid();
+ }
+ else {
+ debug(D_WEB_CLIENT, "Web files group set to %s.", web_group);
+ owner_gid = gr->gr_gid;
+ }
+ }
+ }
+
+ return(owner_gid);
+}
+
+static struct {
+ const char *extension;
+ uint32_t hash;
+ uint8_t contenttype;
+} mime_types[] = {
+ { "html" , 0 , CT_TEXT_HTML}
+ , {"js" , 0 , CT_APPLICATION_X_JAVASCRIPT}
+ , {"css" , 0 , CT_TEXT_CSS}
+ , {"xml" , 0 , CT_TEXT_XML}
+ , {"xsl" , 0 , CT_TEXT_XSL}
+ , {"txt" , 0 , CT_TEXT_PLAIN}
+ , {"svg" , 0 , CT_IMAGE_SVG_XML}
+ , {"ttf" , 0 , CT_APPLICATION_X_FONT_TRUETYPE}
+ , {"otf" , 0 , CT_APPLICATION_X_FONT_OPENTYPE}
+ , {"woff2", 0 , CT_APPLICATION_FONT_WOFF2}
+ , {"woff" , 0 , CT_APPLICATION_FONT_WOFF}
+ , {"eot" , 0 , CT_APPLICATION_VND_MS_FONTOBJ}
+ , {"png" , 0 , CT_IMAGE_PNG}
+ , {"jpg" , 0 , CT_IMAGE_JPG}
+ , {"jpeg" , 0 , CT_IMAGE_JPG}
+ , {"gif" , 0 , CT_IMAGE_GIF}
+ , {"bmp" , 0 , CT_IMAGE_BMP}
+ , {"ico" , 0 , CT_IMAGE_XICON}
+ , {"icns" , 0 , CT_IMAGE_ICNS}
+ , { NULL, 0, 0}
+};
+
+static inline uint8_t contenttype_for_filename(const char *filename) {
+ // info("checking filename '%s'", filename);
+
+ static int initialized = 0;
+ int i;
+
+ if(unlikely(!initialized)) {
+ for (i = 0; mime_types[i].extension; i++)
+ mime_types[i].hash = simple_hash(mime_types[i].extension);
+
+ initialized = 1;
+ }
+
+ const char *s = filename, *last_dot = NULL;
+
+ // find the last dot
+ while(*s) {
+ if(unlikely(*s == '.')) last_dot = s;
+ s++;
+ }
+
+ if(unlikely(!last_dot || !*last_dot || !last_dot[1])) {
+ // info("no extension for filename '%s'", filename);
+ return CT_APPLICATION_OCTET_STREAM;
+ }
+ last_dot++;
+
+ // info("extension for filename '%s' is '%s'", filename, last_dot);
+
+ uint32_t hash = simple_hash(last_dot);
+ for(i = 0; mime_types[i].extension ; i++) {
+ if(unlikely(hash == mime_types[i].hash && !strcmp(last_dot, mime_types[i].extension))) {
+ // info("matched extension for filename '%s': '%s'", filename, last_dot);
+ return mime_types[i].contenttype;
+ }
+ }
+
+ // info("not matched extension for filename '%s': '%s'", filename, last_dot);
+ return CT_APPLICATION_OCTET_STREAM;
+}
+
+static inline int access_to_file_is_not_permitted(struct web_client *w, const char *filename) {
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Access to file is not permitted: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return 403;
+}
+
+int mysendfile(struct web_client *w, char *filename) {
+ debug(D_WEB_CLIENT, "%llu: Looking for file '%s/%s'", w->id, netdata_configured_web_dir, filename);
+
+ if(!web_client_can_access_dashboard(w))
+ return web_client_permission_denied(w);
+
+ // skip leading slashes
+ while (*filename == '/') filename++;
+
+ // if the filename contains "strange" characters, refuse to serve it
+ char *s;
+ for(s = filename; *s ;s++) {
+ if( !isalnum(*s) && *s != '/' && *s != '.' && *s != '-' && *s != '_') {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Filename contains invalid characters: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return 400;
+ }
+ }
+
+ // if the filename contains a .. refuse to serve it
+ if(strstr(filename, "..") != 0) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Relative filenames are not supported: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return 400;
+ }
+
+ // find the physical file on disk
+ char webfilename[FILENAME_MAX + 1];
+ snprintfz(webfilename, FILENAME_MAX, "%s/%s", netdata_configured_web_dir, filename);
+
+ struct stat statbuf;
+ int done = 0;
+ while(!done) {
+ // check if the file exists
+ if (lstat(webfilename, &statbuf) != 0) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not found.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "File does not exist, or is not accessible: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return 404;
+ }
+
+ if ((statbuf.st_mode & S_IFMT) == S_IFDIR) {
+ snprintfz(webfilename, FILENAME_MAX, "%s/%s/index.html", netdata_configured_web_dir, filename);
+ continue;
+ }
+
+ if ((statbuf.st_mode & S_IFMT) != S_IFREG) {
+ error("%llu: File '%s' is not a regular file. Access Denied.", w->id, webfilename);
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ // check if the file is owned by expected user
+ if (statbuf.st_uid != web_files_uid()) {
+ error("%llu: File '%s' is owned by user %u (expected user %u). Access Denied.", w->id, webfilename, statbuf.st_uid, web_files_uid());
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ // check if the file is owned by expected group
+ if (statbuf.st_gid != web_files_gid()) {
+ error("%llu: File '%s' is owned by group %u (expected group %u). Access Denied.", w->id, webfilename, statbuf.st_gid, web_files_gid());
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ done = 1;
+ }
+
+ // open the file
+ w->ifd = open(webfilename, O_NONBLOCK, O_RDONLY);
+ if(w->ifd == -1) {
+ w->ifd = w->ofd;
+
+ if(errno == EBUSY || errno == EAGAIN) {
+ error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.header, "Location: /%s\r\n", filename);
+ buffer_strcat(w->response.data, "File is currently busy, please try again later: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return 307;
+ }
+ else {
+ error("%llu: Cannot open file '%s'.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Cannot open file: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return 404;
+ }
+ }
+
+ sock_setnonblock(w->ifd);
+
+ w->response.data->contenttype = contenttype_for_filename(webfilename);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Sending file '%s' (%ld bytes, ifd %d, ofd %d).", w->id, webfilename, statbuf.st_size, w->ifd, w->ofd);
+
+ w->mode = WEB_CLIENT_MODE_FILECOPY;
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+ buffer_flush(w->response.data);
+ buffer_need_bytes(w->response.data, (size_t)statbuf.st_size);
+ w->response.rlen = (size_t)statbuf.st_size;
+#ifdef __APPLE__
+ w->response.data->date = statbuf.st_mtimespec.tv_sec;
+#else
+ w->response.data->date = statbuf.st_mtim.tv_sec;
+#endif /* __APPLE__ */
+ buffer_cacheable(w->response.data);
+
+ return 200;
+}
+
+
+#ifdef NETDATA_WITH_ZLIB
+void web_client_enable_deflate(struct web_client *w, int gzip) {
+ if(unlikely(w->response.zinitialized)) {
+ debug(D_DEFLATE, "%llu: Compression has already be initialized for this client.", w->id);
+ return;
+ }
+
+ if(unlikely(w->response.sent)) {
+ error("%llu: Cannot enable compression in the middle of a conversation.", w->id);
+ return;
+ }
+
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+
+ w->response.zstream.next_in = (Bytef *)w->response.data->buffer;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.total_in = 0;
+
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_out = 0;
+
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+
+// if(deflateInit(&w->response.zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+// error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id);
+// return;
+// }
+
+ // Select GZIP compression: windowbits = 15 + 16 = 31
+ if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + ((gzip)?16:0), 8, web_gzip_strategy) != Z_OK) {
+ error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id);
+ return;
+ }
+
+ w->response.zsent = 0;
+ w->response.zoutput = 1;
+ w->response.zinitialized = 1;
+
+ debug(D_DEFLATE, "%llu: Initialized compression.", w->id);
+}
+#endif // NETDATA_WITH_ZLIB
+
+void buffer_data_options2string(BUFFER *wb, uint32_t options) {
+ int count = 0;
+
+ if(options & RRDR_OPTION_NONZERO) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "nonzero");
+ }
+
+ if(options & RRDR_OPTION_REVERSED) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "flip");
+ }
+
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "jsonwrap");
+ }
+
+ if(options & RRDR_OPTION_MIN2MAX) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "min2max");
+ }
+
+ if(options & RRDR_OPTION_MILLISECONDS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "ms");
+ }
+
+ if(options & RRDR_OPTION_ABSOLUTE) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "absolute");
+ }
+
+ if(options & RRDR_OPTION_SECONDS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "seconds");
+ }
+
+ if(options & RRDR_OPTION_NULL2ZERO) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "null2zero");
+ }
+
+ if(options & RRDR_OPTION_OBJECTSROWS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "objectrows");
+ }
+
+ if(options & RRDR_OPTION_GOOGLE_JSON) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "google_json");
+ }
+
+ if(options & RRDR_OPTION_PERCENTAGE) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "percentage");
+ }
+
+ if(options & RRDR_OPTION_NOT_ALIGNED) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "unaligned");
+ }
+}
+
+static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "This host does not maintain a database");
+ return 400;
+ }
+
+ return func(host, w, url);
+}
+
+static inline int check_host_and_dashboard_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ if(!web_client_can_access_dashboard(w))
+ return web_client_permission_denied(w);
+
+ return check_host_and_call(host, w, url, func);
+}
+
+int web_client_api_request(RRDHOST *host, struct web_client *w, char *url)
+{
+ // get the api version
+ char *tok = mystrsep(&url, "/?&");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for API version '%s'.", w->id, tok);
+ if(strcmp(tok, "v1") == 0)
+ return web_client_api_request_v1(host, w, url);
+ else {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Unsupported API version: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ return 404;
+ }
+ }
+ else {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API version?");
+ return 400;
+ }
+}
+
+const char *web_content_type_to_string(uint8_t contenttype) {
+ switch(contenttype) {
+ case CT_TEXT_HTML:
+ return "text/html; charset=utf-8";
+
+ case CT_APPLICATION_XML:
+ return "application/xml; charset=utf-8";
+
+ case CT_APPLICATION_JSON:
+ return "application/json; charset=utf-8";
+
+ case CT_APPLICATION_X_JAVASCRIPT:
+ return "application/x-javascript; charset=utf-8";
+
+ case CT_TEXT_CSS:
+ return "text/css; charset=utf-8";
+
+ case CT_TEXT_XML:
+ return "text/xml; charset=utf-8";
+
+ case CT_TEXT_XSL:
+ return "text/xsl; charset=utf-8";
+
+ case CT_APPLICATION_OCTET_STREAM:
+ return "application/octet-stream";
+
+ case CT_IMAGE_SVG_XML:
+ return "image/svg+xml";
+
+ case CT_APPLICATION_X_FONT_TRUETYPE:
+ return "application/x-font-truetype";
+
+ case CT_APPLICATION_X_FONT_OPENTYPE:
+ return "application/x-font-opentype";
+
+ case CT_APPLICATION_FONT_WOFF:
+ return "application/font-woff";
+
+ case CT_APPLICATION_FONT_WOFF2:
+ return "application/font-woff2";
+
+ case CT_APPLICATION_VND_MS_FONTOBJ:
+ return "application/vnd.ms-fontobject";
+
+ case CT_IMAGE_PNG:
+ return "image/png";
+
+ case CT_IMAGE_JPG:
+ return "image/jpeg";
+
+ case CT_IMAGE_GIF:
+ return "image/gif";
+
+ case CT_IMAGE_XICON:
+ return "image/x-icon";
+
+ case CT_IMAGE_BMP:
+ return "image/bmp";
+
+ case CT_IMAGE_ICNS:
+ return "image/icns";
+
+ case CT_PROMETHEUS:
+ return "text/plain; version=0.0.4";
+
+ default:
+ case CT_TEXT_PLAIN:
+ return "text/plain; charset=utf-8";
+ }
+}
+
+
+const char *web_response_code_to_string(int code) {
+ switch(code) {
+ case 200:
+ return "OK";
+
+ case 307:
+ return "Temporary Redirect";
+
+ case 400:
+ return "Bad Request";
+
+ case 403:
+ return "Forbidden";
+
+ case 404:
+ return "Not Found";
+
+ case 412:
+ return "Preconditions Failed";
+
+ default:
+ if(code >= 100 && code < 200)
+ return "Informational";
+
+ if(code >= 200 && code < 300)
+ return "Successful";
+
+ if(code >= 300 && code < 400)
+ return "Redirection";
+
+ if(code >= 400 && code < 500)
+ return "Bad Request";
+
+ if(code >= 500 && code < 600)
+ return "Server Error";
+
+ return "Undefined Error";
+ }
+}
+
+static inline char *http_header_parse(struct web_client *w, char *s, int parse_useragent) {
+ static uint32_t hash_origin = 0, hash_connection = 0, hash_accept_encoding = 0, hash_donottrack = 0, hash_useragent = 0;
+
+ if(unlikely(!hash_origin)) {
+ hash_origin = simple_uhash("Origin");
+ hash_connection = simple_uhash("Connection");
+ hash_accept_encoding = simple_uhash("Accept-Encoding");
+ hash_donottrack = simple_uhash("DNT");
+ hash_useragent = simple_uhash("User-Agent");
+ }
+
+ char *e = s;
+
+ // find the :
+ while(*e && *e != ':') e++;
+ if(!*e) return e;
+
+ // get the name
+ *e = '\0';
+
+ // find the value
+ char *v = e + 1, *ve;
+
+ // skip leading spaces from value
+ while(*v == ' ') v++;
+ ve = v;
+
+ // find the \r
+ while(*ve && *ve != '\r') ve++;
+ if(!*ve || ve[1] != '\n') {
+ *e = ':';
+ return ve;
+ }
+
+ // terminate the value
+ *ve = '\0';
+
+ // fprintf(stderr, "HEADER: '%s' = '%s'\n", s, v);
+ uint32_t hash = simple_uhash(s);
+
+ if(hash == hash_origin && !strcasecmp(s, "Origin"))
+ strncpyz(w->origin, v, NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE);
+
+ else if(hash == hash_connection && !strcasecmp(s, "Connection")) {
+ if(strcasestr(v, "keep-alive"))
+ web_client_enable_keepalive(w);
+ }
+ else if(respect_web_browser_do_not_track_policy && hash == hash_donottrack && !strcasecmp(s, "DNT")) {
+ if(*v == '0') web_client_disable_donottrack(w);
+ else if(*v == '1') web_client_enable_donottrack(w);
+ }
+ else if(parse_useragent && hash == hash_useragent && !strcasecmp(s, "User-Agent")) {
+ w->user_agent = strdupz(v);
+ }
+#ifdef NETDATA_WITH_ZLIB
+ else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) {
+ if(web_enable_gzip) {
+ if(strcasestr(v, "gzip"))
+ web_client_enable_deflate(w, 1);
+ //
+ // does not seem to work
+ // else if(strcasestr(v, "deflate"))
+ // web_client_enable_deflate(w, 0);
+ }
+ }
+#endif /* NETDATA_WITH_ZLIB */
+
+ *e = ':';
+ *ve = '\r';
+ return ve;
+}
+
+// http_request_validate()
+// returns:
+// = 0 : all good, process the request
+// > 0 : request is not supported
+// < 0 : request is incomplete - wait for more data
+
+typedef enum {
+ HTTP_VALIDATION_OK,
+ HTTP_VALIDATION_NOT_SUPPORTED,
+ HTTP_VALIDATION_INCOMPLETE
+} HTTP_VALIDATION;
+
+static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
+ char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL;
+
+ size_t last_pos = w->header_parse_last_size;
+ if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n
+ else last_pos = 0;
+
+ w->header_parse_tries++;
+ w->header_parse_last_size = buffer_strlen(w->response.data);
+
+ if(w->header_parse_tries > 1) {
+ if(w->header_parse_last_size < last_pos)
+ last_pos = 0;
+
+ if(strstr(&s[last_pos], "\r\n\r\n") == NULL) {
+ if(w->header_parse_tries > 10) {
+ info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data));
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ }
+
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+ }
+
+ // is is a valid request?
+ if(!strncmp(s, "GET ", 4)) {
+ encoded_url = s = &s[4];
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+ }
+ else if(!strncmp(s, "OPTIONS ", 8)) {
+ encoded_url = s = &s[8];
+ w->mode = WEB_CLIENT_MODE_OPTIONS;
+ }
+ else if(!strncmp(s, "STREAM ", 7)) {
+ encoded_url = s = &s[7];
+ w->mode = WEB_CLIENT_MODE_STREAM;
+ }
+ else {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ }
+
+ // find the SPACE + "HTTP/"
+ while(*s) {
+ // find the next space
+ while (*s && *s != ' ') s++;
+
+ // is it SPACE + "HTTP/" ?
+ if(*s && !strncmp(s, " HTTP/", 6)) break;
+ else s++;
+ }
+
+ // incomplete requests
+ if(unlikely(!*s)) {
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ // we have the end of encoded_url - remember it
+ char *ue = s;
+
+ // make sure we have complete request
+ // complete requests contain: \r\n\r\n
+ while(*s) {
+ // find a line feed
+ while(*s && *s++ != '\r');
+
+ // did we reach the end?
+ if(unlikely(!*s)) break;
+
+ // is it \r\n ?
+ if(likely(*s++ == '\n')) {
+
+ // is it again \r\n ? (header end)
+ if(unlikely(*s == '\r' && s[1] == '\n')) {
+ // a valid complete HTTP request found
+
+ *ue = '\0';
+ url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1);
+ *ue = ' ';
+
+ // copy the URL - we are going to overwrite parts of it
+ // TODO -- ideally we we should avoid copying buffers around
+ strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE);
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_OK;
+ }
+
+ // another header line
+ s = http_header_parse(w, s,
+ (w->mode == WEB_CLIENT_MODE_STREAM) // parse user agent
+ );
+ }
+ }
+
+ // incomplete request
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+}
+
+static inline void web_client_send_http_header(struct web_client *w) {
+ if(unlikely(w->response.code != 200))
+ buffer_no_cacheable(w->response.data);
+
+ // set a proper expiration date, if not already set
+ if(unlikely(!w->response.data->expires)) {
+ if(w->response.data->options & WB_CONTENT_NO_CACHEABLE)
+ w->response.data->expires = w->tv_ready.tv_sec + localhost->rrd_update_every;
+ else
+ w->response.data->expires = w->tv_ready.tv_sec + 86400;
+ }
+
+ // prepare the HTTP response header
+ debug(D_WEB_CLIENT, "%llu: Generating HTTP header with response %d.", w->id, w->response.code);
+
+ const char *content_type_string = web_content_type_to_string(w->response.data->contenttype);
+ const char *code_msg = web_response_code_to_string(w->response.code);
+
+ // prepare the last modified and expiration dates
+ char date[32], edate[32];
+ {
+ struct tm tmbuf, *tm;
+
+ tm = gmtime_r(&w->response.data->date, &tmbuf);
+ strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %Z", tm);
+
+ tm = gmtime_r(&w->response.data->expires, &tmbuf);
+ strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", tm);
+ }
+
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Connection: %s\r\n"
+ "Server: NetData Embedded HTTP Server v%s\r\n"
+ "Access-Control-Allow-Origin: %s\r\n"
+ "Access-Control-Allow-Credentials: true\r\n"
+ "Content-Type: %s\r\n"
+ "Date: %s\r\n"
+ , w->response.code, code_msg
+ , web_client_has_keepalive(w)?"keep-alive":"close"
+ , VERSION
+ , w->origin
+ , content_type_string
+ , date
+ );
+
+ if(unlikely(web_x_frame_options))
+ buffer_sprintf(w->response.header_output, "X-Frame-Options: %s\r\n", web_x_frame_options);
+
+ if(w->cookie1[0] || w->cookie2[0]) {
+ if(w->cookie1[0]) {
+ buffer_sprintf(w->response.header_output,
+ "Set-Cookie: %s\r\n",
+ w->cookie1);
+ }
+
+ if(w->cookie2[0]) {
+ buffer_sprintf(w->response.header_output,
+ "Set-Cookie: %s\r\n",
+ w->cookie2);
+ }
+
+ if(respect_web_browser_do_not_track_policy)
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ }
+ else {
+ if(respect_web_browser_do_not_track_policy) {
+ if(web_client_has_tracking_required(w))
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ else
+ buffer_sprintf(w->response.header_output,
+ "Tk: N\r\n");
+ }
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_OPTIONS) {
+ buffer_strcat(w->response.header_output,
+ "Access-Control-Allow-Methods: GET, OPTIONS\r\n"
+ "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control\r\n"
+ "Access-Control-Max-Age: 1209600\r\n" // 86400 * 14
+ );
+ }
+ else {
+ buffer_sprintf(w->response.header_output,
+ "Cache-Control: %s\r\n"
+ "Expires: %s\r\n",
+ (w->response.data->options & WB_CONTENT_NO_CACHEABLE)?"no-cache":"public",
+ edate);
+ }
+
+ // copy a possibly available custom header
+ if(unlikely(buffer_strlen(w->response.header)))
+ buffer_strcat(w->response.header_output, buffer_tostring(w->response.header));
+
+ // headers related to the transfer method
+ if(likely(w->response.zoutput)) {
+ buffer_strcat(w->response.header_output,
+ "Content-Encoding: gzip\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ );
+ }
+ else {
+ if(likely((w->response.data->len || w->response.rlen))) {
+ // we know the content length, put it
+ buffer_sprintf(w->response.header_output, "Content-Length: %zu\r\n", w->response.data->len? w->response.data->len: w->response.rlen);
+ }
+ else {
+ // we don't know the content length, disable keep-alive
+ web_client_disable_keepalive(w);
+ }
+ }
+
+ // end of HTTP header
+ buffer_strcat(w->response.header_output, "\r\n");
+
+ // sent the HTTP header
+ debug(D_WEB_DATA, "%llu: Sending response HTTP header of size %zu: '%s'"
+ , w->id
+ , buffer_strlen(w->response.header_output)
+ , buffer_tostring(w->response.header_output)
+ );
+
+ web_client_crock_socket(w);
+
+ size_t count = 0;
+ ssize_t bytes;
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+
+ if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) {
+ if(bytes > 0)
+ w->stats_sent_bytes += bytes;
+
+ error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client."
+ , buffer_strlen(w->response.header_output)
+ , bytes);
+
+ WEB_CLIENT_IS_DEAD(w);
+ return;
+ }
+ else
+ w->stats_sent_bytes += bytes;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url);
+
+static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t hash_localhost = 0;
+
+ if(unlikely(!hash_localhost)) {
+ hash_localhost = simple_hash("localhost");
+ }
+
+ if(host != localhost) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Nesting of hosts is not allowed.");
+ return 400;
+ }
+
+ char *tok = mystrsep(&url, "/?&");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for host with name '%s'.", w->id, tok);
+
+ // copy the URL, we need it to serve files
+ w->last_url[0] = '/';
+ if(url && *url) strncpyz(&w->last_url[1], url, NETDATA_WEB_REQUEST_URL_SIZE - 1);
+ else w->last_url[1] = '\0';
+
+ uint32_t hash = simple_hash(tok);
+
+ host = rrdhost_find_by_hostname(tok, hash);
+ if(!host) host = rrdhost_find_by_guid(tok, hash);
+
+ if(host) return web_client_process_url(host, w, url);
+ }
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "This netdata does not maintain a database for host: ");
+ buffer_strcat_htmlescape(w->response.data, tok?tok:"");
+ return 404;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t
+ hash_api = 0,
+ hash_netdata_conf = 0,
+ hash_host = 0;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ static uint32_t hash_exit = 0, hash_debug = 0, hash_mirror = 0;
+#endif
+
+ if(unlikely(!hash_api)) {
+ hash_api = simple_hash("api");
+ hash_netdata_conf = simple_hash("netdata.conf");
+ hash_host = simple_hash("host");
+#ifdef NETDATA_INTERNAL_CHECKS
+ hash_exit = simple_hash("exit");
+ hash_debug = simple_hash("debug");
+ hash_mirror = simple_hash("mirror");
+#endif
+ }
+
+ char *tok = mystrsep(&url, "/?");
+ if(likely(tok && *tok)) {
+ uint32_t hash = simple_hash(tok);
+ debug(D_WEB_CLIENT, "%llu: Processing command '%s'.", w->id, tok);
+
+ if(unlikely(hash == hash_api && strcmp(tok, "api") == 0)) { // current API
+ debug(D_WEB_CLIENT_ACCESS, "%llu: API request ...", w->id);
+ return check_host_and_call(host, w, url, web_client_api_request);
+ }
+ else if(unlikely(hash == hash_host && strcmp(tok, "host") == 0)) { // host switching
+ debug(D_WEB_CLIENT_ACCESS, "%llu: host switch request ...", w->id);
+ return web_client_switch_host(host, w, url);
+ }
+ else if(unlikely(hash == hash_netdata_conf && strcmp(tok, "netdata.conf") == 0)) { // netdata.conf
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: generating netdata.conf ...", w->id);
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ config_generate(w->response.data, 0);
+ return 200;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+
+ if(!netdata_exit)
+ buffer_strcat(w->response.data, "ok, will do...");
+ else
+ buffer_strcat(w->response.data, "I am doing it already");
+
+ error("web request to exit received.");
+ netdata_cleanup_and_exit(0);
+ return 200;
+ }
+ else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ buffer_flush(w->response.data);
+
+ // get the name of the data to show
+ tok = mystrsep(&url, "/?&");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok);
+
+ // do we have such a data set?
+ RRDSET *st = rrdset_find_byname(host, tok);
+ if(!st) st = rrdset_find(host, tok);
+ if(!st) {
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok);
+ return 404;
+ }
+
+ debug_flags |= D_RRD_STATS;
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))
+ rrdset_flag_clear(st, RRDSET_FLAG_DEBUG);
+ else
+ rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ return 200;
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "debug which chart?\r\n");
+ return 400;
+ }
+ else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Mirroring...", w->id);
+
+ // replace the zero bytes with spaces
+ buffer_char_replace(w->response.data, '\0', ' ');
+
+ // just leave the buffer as is
+ // it will be copied back to the client
+
+ return 200;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ char filename[FILENAME_MAX+1];
+ url = filename;
+ strncpyz(filename, w->last_url, FILENAME_MAX);
+ tok = mystrsep(&url, "?");
+ buffer_flush(w->response.data);
+ return mysendfile(w, (tok && *tok)?tok:"/");
+}
+
+void web_client_process_request(struct web_client *w) {
+
+ // start timing us
+ now_realtime_timeval(&w->tv_in);
+
+ switch(http_request_validate(w)) {
+ case HTTP_VALIDATION_OK:
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_STREAM:
+ if(unlikely(!web_client_can_access_stream(w))) {
+ web_client_permission_denied(w);
+ return;
+ }
+
+ w->response.code = rrdpush_receiver_thread_spawn(localhost, w, w->decoded_url);
+ return;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ if(unlikely(!web_client_can_access_dashboard(w) && !web_client_can_access_registry(w) && !web_client_can_access_badges(w))) {
+ web_client_permission_denied(w);
+ return;
+ }
+
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "OK");
+ w->response.code = 200;
+ break;
+
+ case WEB_CLIENT_MODE_FILECOPY:
+ case WEB_CLIENT_MODE_NORMAL:
+ if(unlikely(!web_client_can_access_dashboard(w) && !web_client_can_access_registry(w) && !web_client_can_access_badges(w))) {
+ web_client_permission_denied(w);
+ return;
+ }
+
+ w->response.code = web_client_process_url(localhost, w, w->decoded_url);
+ break;
+ }
+ break;
+
+ case HTTP_VALIDATION_INCOMPLETE:
+ if(w->response.data->len > NETDATA_WEB_REQUEST_MAX_SIZE) {
+ strcpy(w->last_url, "too big request");
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Received request is too big (%zu bytes).", w->id, w->response.data->len);
+
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Received request is too big (%zu bytes).\r\n", w->response.data->len);
+ w->response.code = 400;
+ }
+ else {
+ // wait for more data
+ return;
+ }
+ break;
+
+ case HTTP_VALIDATION_NOT_SUPPORTED:
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Cannot understand '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "I don't understand you...\r\n");
+ w->response.code = 400;
+ break;
+ }
+
+ // keep track of the time we done processing
+ now_realtime_timeval(&w->tv_ready);
+
+ w->response.sent = 0;
+
+ // set a proper last modified date
+ if(unlikely(!w->response.data->date))
+ w->response.data->date = w->tv_ready.tv_sec;
+
+ web_client_send_http_header(w);
+
+ // enable sending immediately if we have data
+ if(w->response.data->len) web_client_enable_wait_send(w);
+ else web_client_disable_wait_send(w);
+
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_STREAM:
+ debug(D_WEB_CLIENT, "%llu: STREAM done.", w->id);
+ break;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ debug(D_WEB_CLIENT, "%llu: Done preparing the OPTIONS response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case WEB_CLIENT_MODE_NORMAL:
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case WEB_CLIENT_MODE_FILECOPY:
+ if(w->response.rlen) {
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending data file of %zu bytes to client.", w->id, w->response.rlen);
+ web_client_enable_wait_receive(w);
+
+ /*
+ // utilize the kernel sendfile() for copying the file to the socket.
+ // this block of code can be commented, without anything missing.
+ // when it is commented, the program will copy the data using async I/O.
+ {
+ long len = sendfile(w->ofd, w->ifd, NULL, w->response.data->rbytes);
+ if(len != w->response.data->rbytes)
+ error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len);
+ else
+ web_client_request_done(w);
+ }
+ */
+ }
+ else
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending an unknown amount of bytes to client.", w->id);
+ break;
+
+ default:
+ fatal("%llu: Unknown client mode %u.", w->id, w->mode);
+ break;
+ }
+}
+
+ssize_t web_client_send_chunk_header(struct web_client *w, size_t len)
+{
+ debug(D_DEFLATE, "%llu: OPEN CHUNK of %zu bytes (hex: %zx).", w->id, len, len);
+ char buf[24];
+ sprintf(buf, "%zX\r\n", len);
+
+ ssize_t bytes = send(w->ofd, buf, strlen(buf), 0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk header %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk header to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk header to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_close(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: CLOSE CHUNK.", w->id);
+
+ ssize_t bytes = send(w->ofd, "\r\n", 2, 0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk suffix to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_finalize(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: FINALIZE CHUNK.", w->id);
+
+ ssize_t bytes = send(w->ofd, "\r\n0\r\n\r\n", 7, 0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk finalize suffix to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk finalize suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+#ifdef NETDATA_WITH_ZLIB
+ssize_t web_client_send_deflate(struct web_client *w)
+{
+ ssize_t len = 0, t = 0;
+
+ // when using compression,
+ // w->response.sent is the amount of bytes passed through compression
+
+ debug(D_DEFLATE, "%llu: web_client_send_deflate(): w->response.data->len = %zu, w->response.sent = %zu, w->response.zhave = %zu, w->response.zsent = %zu, w->response.zstream.avail_in = %u, w->response.zstream.avail_out = %u, w->response.zstream.total_in = %lu, w->response.zstream.total_out = %lu.",
+ w->id, w->response.data->len, w->response.sent, w->response.zhave, w->response.zsent, w->response.zstream.avail_in, w->response.zstream.avail_out, w->response.zstream.total_in, w->response.zstream.total_out);
+
+ if(w->response.data->len - w->response.sent == 0 && w->response.zstream.avail_in == 0 && w->response.zhave == w->response.zsent && w->response.zstream.avail_out != 0) {
+ // there is nothing to send
+
+ debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // finalize the chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_finalize(w);
+ if(t < 0) return t;
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return t;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return t;
+ }
+
+ // reset the client
+ web_client_request_done(w);
+ debug(D_WEB_CLIENT, "%llu: Done sending all data on socket.", w->id);
+ return t;
+ }
+
+ if(w->response.zhave == w->response.zsent) {
+ // compress more input data
+
+ // close the previous open chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_close(w);
+ if(t < 0) return t;
+ }
+
+ debug(D_DEFLATE, "%llu: Compressing %zu new bytes starting from %zu (and %u left behind).", w->id, (w->response.data->len - w->response.sent), w->response.sent, w->response.zstream.avail_in);
+
+ // give the compressor all the data not passed through the compressor yet
+ if(w->response.data->len > w->response.sent) {
+ w->response.zstream.next_in = (Bytef *)&w->response.data->buffer[w->response.sent - w->response.zstream.avail_in];
+ w->response.zstream.avail_in += (uInt) (w->response.data->len - w->response.sent);
+ }
+
+ // reset the compressor output buffer
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE;
+
+ // ask for FINISH if we have all the input
+ int flush = Z_SYNC_FLUSH;
+ if(w->mode == WEB_CLIENT_MODE_NORMAL
+ || (w->mode == WEB_CLIENT_MODE_FILECOPY && !web_client_has_wait_receive(w) && w->response.data->len == w->response.rlen)) {
+ flush = Z_FINISH;
+ debug(D_DEFLATE, "%llu: Requesting Z_FINISH, if possible.", w->id);
+ }
+ else {
+ debug(D_DEFLATE, "%llu: Requesting Z_SYNC_FLUSH.", w->id);
+ }
+
+ // compress
+ if(deflate(&w->response.zstream, flush) == Z_STREAM_ERROR) {
+ error("%llu: Compression failed. Closing down client.", w->id);
+ web_client_request_done(w);
+ return(-1);
+ }
+
+ w->response.zhave = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out;
+ w->response.zsent = 0;
+
+ // keep track of the bytes passed through the compressor
+ w->response.sent = w->response.data->len;
+
+ debug(D_DEFLATE, "%llu: Compression produced %zu bytes.", w->id, w->response.zhave);
+
+ // open a new chunk
+ ssize_t t2 = web_client_send_chunk_header(w, w->response.zhave);
+ if(t2 < 0) return t2;
+ t += t2;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: Sending %zu bytes of data (+%zd of chunk header).", w->id, w->response.zhave - w->response.zsent, t);
+
+ len = send(w->ofd, &w->response.zbuffer[w->response.zsent], (size_t) (w->response.zhave - w->response.zsent), MSG_DONTWAIT);
+ if(len > 0) {
+ w->stats_sent_bytes += len;
+ w->response.zsent += len;
+ len += t;
+ debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, len);
+ }
+ else if(len == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client (zhave = %zu, zsent = %zu, need to send = %zu).",
+ w->id, w->response.zhave, w->response.zsent, w->response.zhave - w->response.zsent);
+
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(len);
+}
+#endif // NETDATA_WITH_ZLIB
+
+ssize_t web_client_send(struct web_client *w) {
+#ifdef NETDATA_WITH_ZLIB
+ if(likely(w->response.zoutput)) return web_client_send_deflate(w);
+#endif // NETDATA_WITH_ZLIB
+
+ ssize_t bytes;
+
+ if(unlikely(w->response.data->len - w->response.sent == 0)) {
+ // there is nothing to send
+
+ debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // there can be two cases for this
+ // A. we have done everything
+ // B. we temporarily have nothing to send, waiting for the buffer to be filled by ifd
+
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return 0;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return 0;
+ }
+
+ web_client_request_done(w);
+ debug(D_WEB_CLIENT, "%llu: Done sending all data on socket. Waiting for next request on the same socket.", w->id);
+ return 0;
+ }
+
+ bytes = send(w->ofd, &w->response.data->buffer[w->response.sent], w->response.data->len - w->response.sent, MSG_DONTWAIT);
+ if(likely(bytes > 0)) {
+ w->stats_sent_bytes += bytes;
+ w->response.sent += bytes;
+ debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, bytes);
+ }
+ else if(likely(bytes == 0)) {
+ debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_read_file(struct web_client *w)
+{
+ if(unlikely(w->response.rlen > w->response.data->size))
+ buffer_need_bytes(w->response.data, w->response.rlen - w->response.data->size);
+
+ if(unlikely(w->response.rlen <= w->response.data->len))
+ return 0;
+
+ ssize_t left = w->response.rlen - w->response.data->len;
+ ssize_t bytes = read(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t)left);
+ if(likely(bytes > 0)) {
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ debug(D_WEB_CLIENT, "%llu: Read %zd bytes.", w->id, bytes);
+ debug(D_WEB_DATA, "%llu: Read data: '%s'.", w->id, &w->response.data->buffer[old]);
+
+ web_client_enable_wait_send(w);
+
+ if(w->response.rlen && w->response.data->len >= w->response.rlen)
+ web_client_disable_wait_receive(w);
+ }
+ else if(likely(bytes == 0)) {
+ debug(D_WEB_CLIENT, "%llu: Out of input file data.", w->id);
+
+ // if we cannot read, it means we have an error on input.
+ // if however, we are copying a file from ifd to ofd, we should not return an error.
+ // in this case, the error should be generated when the file has been sent to the client.
+
+ // we are copying data from ifd to ofd
+ // let it finish copying...
+ web_client_disable_wait_receive(w);
+
+ debug(D_WEB_CLIENT, "%llu: Read the whole file.", w->id);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != w->ofd) close(w->ifd);
+ }
+
+ w->ifd = w->ofd;
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: read data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_receive(struct web_client *w)
+{
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY))
+ return web_client_read_file(w);
+
+ // do we have any space for more data?
+ buffer_need_bytes(w->response.data, NETDATA_WEB_REQUEST_RECEIVE_SIZE);
+
+ ssize_t left = w->response.data->size - w->response.data->len;
+ ssize_t bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+
+ if(likely(bytes > 0)) {
+ w->stats_received_bytes += bytes;
+
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ debug(D_WEB_CLIENT, "%llu: Received %zd bytes.", w->id, bytes);
+ debug(D_WEB_DATA, "%llu: Received data: '%s'.", w->id, &w->response.data->buffer[old]);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: receive data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
diff --git a/web/server/web_client.h b/web/server/web_client.h
new file mode 100644
index 000000000..b9e528fca
--- /dev/null
+++ b/web/server/web_client.h
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_H
+#define NETDATA_WEB_CLIENT_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#ifdef NETDATA_WITH_ZLIB
+extern int web_enable_gzip,
+ web_gzip_level,
+ web_gzip_strategy;
+#endif /* NETDATA_WITH_ZLIB */
+
+extern int respect_web_browser_do_not_track_policy;
+extern char *web_x_frame_options;
+
+typedef enum web_client_mode {
+ WEB_CLIENT_MODE_NORMAL = 0,
+ WEB_CLIENT_MODE_FILECOPY = 1,
+ WEB_CLIENT_MODE_OPTIONS = 2,
+ WEB_CLIENT_MODE_STREAM = 3
+} WEB_CLIENT_MODE;
+
+typedef enum web_client_flags {
+ WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead
+
+ WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used
+
+ WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data
+ WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client
+
+ WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client
+ WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies
+
+ WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket
+ WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8, // if set, the client is using a UNIX socket
+
+ WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = 1 << 9, // don't close the socket when cleaning up (static-threaded web server)
+} WEB_CLIENT_FLAGS;
+
+//#ifdef HAVE_C___ATOMIC
+//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
+//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
+//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
+//#else
+#define web_client_flag_check(w, flag) ((w)->flags & (flag))
+#define web_client_flag_set(w, flag) (w)->flags |= flag
+#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
+//#endif
+
+#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
+#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
+
+#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
+
+#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+
+#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+
+#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+
+#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
+
+#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define NETDATA_WEB_REQUEST_URL_SIZE 8192
+#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384
+#define NETDATA_WEB_RESPONSE_HEADER_SIZE 4096
+#define NETDATA_WEB_REQUEST_COOKIE_SIZE 1024
+#define NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE 1024
+#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 16384
+#define NETDATA_WEB_REQUEST_RECEIVE_SIZE 16384
+#define NETDATA_WEB_REQUEST_MAX_SIZE 16384
+
+struct response {
+ BUFFER *header; // our response header
+ BUFFER *header_output; // internal use
+ BUFFER *data; // our response data buffer
+
+ int code; // the HTTP response code
+
+ size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy)
+ size_t sent; // current data length sent to output
+
+ int zoutput; // if set to 1, web_client_send() will send compressed data
+#ifdef NETDATA_WITH_ZLIB
+ z_stream zstream; // zlib stream for sending compressed output to client
+ Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
+ size_t zsent; // the compressed bytes we have sent to the client
+ size_t zhave; // the compressed bytes that we have received from zlib
+ unsigned int zinitialized:1;
+#endif /* NETDATA_WITH_ZLIB */
+
+};
+
+typedef enum web_client_acl {
+ WEB_CLIENT_ACL_NONE = 0,
+ WEB_CLIENT_ACL_NOCHECK = 0,
+ WEB_CLIENT_ACL_DASHBOARD = 1 << 0,
+ WEB_CLIENT_ACL_REGISTRY = 1 << 1,
+ WEB_CLIENT_ACL_BADGE = 1 << 2
+} WEB_CLIENT_ACL;
+
+#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD)
+#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY)
+#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE)
+
+#define web_client_can_access_stream(w) \
+ (!web_allow_streaming_from || simple_pattern_matches(web_allow_streaming_from, (w)->client_ip))
+
+#define web_client_can_access_netdataconf(w) \
+ (!web_allow_netdataconf_from || simple_pattern_matches(web_allow_netdataconf_from, (w)->client_ip))
+
+struct web_client {
+ unsigned long long id;
+
+ WEB_CLIENT_FLAGS flags; // status flags for the client
+ WEB_CLIENT_MODE mode; // the operational mode of the client
+ WEB_CLIENT_ACL acl; // the access list of the client
+
+ size_t header_parse_tries;
+ size_t header_parse_last_size;
+
+ int tcp_cork; // 1 = we have a cork on the socket
+
+ int ifd;
+ int ofd;
+
+ char client_ip[NI_MAXHOST+1];
+ char client_port[NI_MAXSERV+1];
+
+ char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer
+ char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]; // we keep a copy of the decoded URL here
+
+ struct timeval tv_in, tv_ready;
+
+ char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE+1];
+ char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE+1];
+ char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE+1];
+ char *user_agent;
+
+ struct response response;
+
+ size_t stats_received_bytes;
+ size_t stats_sent_bytes;
+
+ // cache of web_client allocations
+ struct web_client *prev; // maintain a linked list of web clients
+ struct web_client *next; // for the web servers that need it
+
+ // MULTI-THREADED WEB SERVER MEMBERS
+ netdata_thread_t thread; // the thread servicing this client
+ volatile int running; // 1 when the thread runs, 0 otherwise
+
+ // STATIC-THREADED WEB SERVER MEMBERS
+ size_t pollinfo_slot; // POLLINFO slot of the web client
+ size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
+};
+
+extern uid_t web_files_uid(void);
+extern uid_t web_files_gid(void);
+
+extern int web_client_permission_denied(struct web_client *w);
+
+extern ssize_t web_client_send(struct web_client *w);
+extern ssize_t web_client_receive(struct web_client *w);
+extern ssize_t web_client_read_file(struct web_client *w);
+
+extern void web_client_process_request(struct web_client *w);
+extern void web_client_request_done(struct web_client *w);
+
+extern void buffer_data_options2string(BUFFER *wb, uint32_t options);
+
+extern int mysendfile(struct web_client *w, char *filename);
+
+#include "daemon/common.h"
+
+#endif
diff --git a/web/server/web_client_cache.c b/web/server/web_client_cache.c
new file mode 100644
index 000000000..ab470560e
--- /dev/null
+++ b/web/server/web_client_cache.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_client_cache.h"
+
+// ----------------------------------------------------------------------------
+// allocate and free web_clients
+
+static void web_client_zero(struct web_client *w) {
+ // zero everything about it - but keep the buffers
+
+ // remember the pointers to the buffers
+ BUFFER *b1 = w->response.data;
+ BUFFER *b2 = w->response.header;
+ BUFFER *b3 = w->response.header_output;
+
+ // empty the buffers
+ buffer_flush(b1);
+ buffer_flush(b2);
+ buffer_flush(b3);
+
+ freez(w->user_agent);
+
+ // zero everything
+ memset(w, 0, sizeof(struct web_client));
+
+ // restore the pointers of the buffers
+ w->response.data = b1;
+ w->response.header = b2;
+ w->response.header_output = b3;
+}
+
+static void web_client_free(struct web_client *w) {
+ buffer_free(w->response.header_output);
+ buffer_free(w->response.header);
+ buffer_free(w->response.data);
+ freez(w->user_agent);
+ freez(w);
+}
+
+static struct web_client *web_client_alloc(void) {
+ struct web_client *w = callocz(1, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ return w;
+}
+
+// ----------------------------------------------------------------------------
+// web clients caching
+
+// When clients connect and disconnect, avoid allocating and releasing memory.
+// Instead, when new clients get connected, reuse any memory previously allocated
+// for serving web clients that are now disconnected.
+
+// The size of the cache is adaptive. It caches the structures of 2x
+// the number of currently connected clients.
+
+// Comments per server:
+// SINGLE-THREADED : 1 cache is maintained
+// MULTI-THREADED : 1 cache is maintained
+// STATIC-THREADED : 1 cache for each thred of the web server
+
+__thread struct clients_cache web_clients_cache = {
+ .pid = 0,
+ .used = NULL,
+ .used_count = 0,
+ .avail = NULL,
+ .avail_count = 0,
+ .allocated = 0,
+ .reused = 0
+};
+
+inline void web_client_cache_verify(int force) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ static __thread size_t count = 0;
+ count++;
+
+ if(unlikely(force || count > 1000)) {
+ count = 0;
+
+ struct web_client *w;
+ size_t used = 0, avail = 0;
+ for(w = web_clients_cache.used; w ; w = w->next) used++;
+ for(w = web_clients_cache.avail; w ; w = w->next) avail++;
+
+ info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , used, web_clients_cache.used_count
+ , avail, web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#else
+ if(unlikely(force)) {
+ info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , web_clients_cache.used_count
+ , web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#endif
+}
+
+// destroy the cache and free all the memory it uses
+void web_client_cache_destroy(void) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ web_client_cache_verify(1);
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w, *t;
+
+ w = web_clients_cache.used;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.used = NULL;
+ web_clients_cache.used_count = 0;
+
+ w = web_clients_cache.avail;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.avail = NULL;
+ web_clients_cache.avail_count = 0;
+
+ netdata_thread_enable_cancelability();
+}
+
+struct web_client *web_client_get_from_cache_or_allocate() {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid == 0))
+ web_clients_cache.pid = gettid();
+
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w = web_clients_cache.avail;
+
+ if(w) {
+ // get it from avail
+ if (w == web_clients_cache.avail) web_clients_cache.avail = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.avail_count--;
+ web_client_zero(w);
+ web_clients_cache.reused++;
+ }
+ else {
+ // allocate it
+ w = web_client_alloc();
+ web_clients_cache.allocated++;
+ }
+
+ // link it to used web clients
+ if (web_clients_cache.used) web_clients_cache.used->prev = w;
+ w->next = web_clients_cache.used;
+ w->prev = NULL;
+ web_clients_cache.used = w;
+ web_clients_cache.used_count++;
+
+ // initialize it
+ w->id = web_client_connected();
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ netdata_thread_enable_cancelability();
+
+ return w;
+}
+
+void web_client_release(struct web_client *w) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ if(unlikely(w->running))
+ error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port);
+#endif
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port);
+
+ web_server_log_connection(w, "DISCONNECTED");
+ web_client_request_done(w);
+ web_client_disconnected();
+
+ netdata_thread_disable_cancelability();
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1) close(w->ifd);
+ if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd);
+ w->ifd = w->ofd = -1;
+ }
+
+ // unlink it from the used
+ if (w == web_clients_cache.used) web_clients_cache.used = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.used_count--;
+
+ if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) {
+ // we have too many of them - free it
+ web_client_free(w);
+ }
+ else {
+ // link it to the avail
+ if (web_clients_cache.avail) web_clients_cache.avail->prev = w;
+ w->next = web_clients_cache.avail;
+ w->prev = NULL;
+ web_clients_cache.avail = w;
+ web_clients_cache.avail_count++;
+ }
+
+ netdata_thread_enable_cancelability();
+}
+
diff --git a/web/server/web_client_cache.h b/web/server/web_client_cache.h
new file mode 100644
index 000000000..f63888000
--- /dev/null
+++ b/web/server/web_client_cache.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_CACHE_H
+#define NETDATA_WEB_CLIENT_CACHE_H
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+struct clients_cache {
+ pid_t pid;
+
+ struct web_client *used; // the structures of the currently connected clients
+ size_t used_count; // the count the currently connected clients
+
+ struct web_client *avail; // the cached structures, available for future clients
+ size_t avail_count; // the number of cached structures
+
+ size_t reused; // the number of re-uses
+ size_t allocated; // the number of allocations
+};
+
+extern __thread struct clients_cache web_clients_cache;
+
+extern void web_client_release(struct web_client *w);
+extern struct web_client *web_client_get_from_cache_or_allocate();
+extern void web_client_cache_destroy(void);
+extern void web_client_cache_verify(int force);
+
+#include "web_server.h"
+
+#endif //NETDATA_WEB_CLIENT_CACHE_H
diff --git a/web/server/web_server.c b/web/server/web_server.c
new file mode 100644
index 000000000..5a68b125e
--- /dev/null
+++ b/web/server/web_server.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_server.h"
+
+// this file includes 3 web servers:
+//
+// 1. single-threaded, based on select()
+// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select()
+// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf)
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+// --------------------------------------------------------------------------------------
+
+WEB_SERVER_MODE web_server_mode_id(const char *mode) {
+ if(!strcmp(mode, "none"))
+ return WEB_SERVER_MODE_NONE;
+ else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded"))
+ return WEB_SERVER_MODE_SINGLE_THREADED;
+ else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded"))
+ return WEB_SERVER_MODE_STATIC_THREADED;
+ else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded"))
+ return WEB_SERVER_MODE_MULTI_THREADED;
+}
+
+const char *web_server_mode_name(WEB_SERVER_MODE id) {
+ switch(id) {
+ case WEB_SERVER_MODE_NONE:
+ return "none";
+
+ case WEB_SERVER_MODE_SINGLE_THREADED:
+ return "single-threaded";
+
+ case WEB_SERVER_MODE_STATIC_THREADED:
+ return "static-threaded";
+
+ default:
+ case WEB_SERVER_MODE_MULTI_THREADED:
+ return "multi-threaded";
+ }
+}
+
+// --------------------------------------------------------------------------------------
+// API sockets
+
+LISTEN_SOCKETS api_sockets = {
+ .config = &netdata_config,
+ .config_section = CONFIG_SECTION_WEB,
+ .default_bind_to = "*",
+ .default_port = API_LISTEN_PORT,
+ .backlog = API_LISTEN_BACKLOG
+};
+
+int api_listen_sockets_setup(void) {
+ int socks = listen_sockets_setup(&api_sockets);
+
+ if(!socks)
+ fatal("LISTENER: Cannot listen on any API socket. Exiting...");
+
+ return socks;
+}
+
+
+// --------------------------------------------------------------------------------------
+// access lists
+
+SIMPLE_PATTERN *web_allow_connections_from = NULL;
+SIMPLE_PATTERN *web_allow_streaming_from = NULL;
+SIMPLE_PATTERN *web_allow_netdataconf_from = NULL;
+
+// WEB_CLIENT_ACL
+SIMPLE_PATTERN *web_allow_dashboard_from = NULL;
+SIMPLE_PATTERN *web_allow_registry_from = NULL;
+SIMPLE_PATTERN *web_allow_badges_from = NULL;
+
+void web_client_update_acl_matches(struct web_client *w) {
+ w->acl = WEB_CLIENT_ACL_NONE;
+
+ if(!web_allow_dashboard_from || simple_pattern_matches(web_allow_dashboard_from, w->client_ip))
+ w->acl |= WEB_CLIENT_ACL_DASHBOARD;
+
+ if(!web_allow_registry_from || simple_pattern_matches(web_allow_registry_from, w->client_ip))
+ w->acl |= WEB_CLIENT_ACL_REGISTRY;
+
+ if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip))
+ w->acl |= WEB_CLIENT_ACL_BADGE;
+}
+
+
+// --------------------------------------------------------------------------------------
+
+void web_server_log_connection(struct web_client *w, const char *msg) {
+ log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg);
+}
+
+// --------------------------------------------------------------------------------------
+
+void web_client_initialize_connection(struct web_client *w) {
+ int flag = 1;
+
+ if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd);
+
+ flag = 1;
+ if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd);
+
+ web_client_update_acl_matches(w);
+
+ w->origin[0] = '*'; w->origin[1] = '\0';
+ w->cookie1[0] = '\0'; w->cookie2[0] = '\0';
+ freez(w->user_agent); w->user_agent = NULL;
+
+ web_client_enable_wait_receive(w);
+
+ web_server_log_connection(w, "CONNECTED");
+
+ web_client_cache_verify(0);
+}
+
+struct web_client *web_client_create_on_listenfd(int listener) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache_or_allocate();
+ w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+
+ if (w->ifd == -1) {
+ if(errno == EPERM)
+ web_server_log_connection(w, "ACCESS DENIED");
+ else {
+ web_server_log_connection(w, "CONNECTION FAILED");
+ error("%llu: Failed to accept new incoming connection.", w->id);
+ }
+
+ web_client_release(w);
+ return NULL;
+ }
+
+ web_client_initialize_connection(w);
+ return(w);
+}
+
diff --git a/web/server/web_server.h b/web/server/web_server.h
new file mode 100644
index 000000000..7777c8978
--- /dev/null
+++ b/web/server/web_server.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_H
+#define NETDATA_WEB_SERVER_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+#ifndef API_LISTEN_PORT
+#define API_LISTEN_PORT 19999
+#endif
+
+#ifndef API_LISTEN_BACKLOG
+#define API_LISTEN_BACKLOG 4096
+#endif
+
+typedef enum web_server_mode {
+ WEB_SERVER_MODE_SINGLE_THREADED,
+ WEB_SERVER_MODE_STATIC_THREADED,
+ WEB_SERVER_MODE_MULTI_THREADED,
+ WEB_SERVER_MODE_NONE
+} WEB_SERVER_MODE;
+
+extern SIMPLE_PATTERN *web_allow_connections_from;
+extern SIMPLE_PATTERN *web_allow_dashboard_from;
+extern SIMPLE_PATTERN *web_allow_registry_from;
+extern SIMPLE_PATTERN *web_allow_badges_from;
+extern SIMPLE_PATTERN *web_allow_streaming_from;
+extern SIMPLE_PATTERN *web_allow_netdataconf_from;
+
+extern WEB_SERVER_MODE web_server_mode;
+
+extern WEB_SERVER_MODE web_server_mode_id(const char *mode);
+extern const char *web_server_mode_name(WEB_SERVER_MODE id);
+
+extern int api_listen_sockets_setup(void);
+
+#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60
+#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60
+extern int web_client_timeout;
+extern int web_client_first_request_timeout;
+extern long web_client_streaming_rate_t;
+
+#ifdef WEB_SERVER_INTERNALS
+extern LISTEN_SOCKETS api_sockets;
+extern void web_client_update_acl_matches(struct web_client *w);
+extern void web_server_log_connection(struct web_client *w, const char *msg);
+extern void web_client_initialize_connection(struct web_client *w);
+extern struct web_client *web_client_create_on_listenfd(int listener);
+
+#include "web_client_cache.h"
+#endif // WEB_SERVER_INTERNALS
+
+#include "single/single-threaded.h"
+#include "multi/multi-threaded.h"
+#include "static/static-threaded.h"
+
+#include "daemon/common.h"
+
+#endif /* NETDATA_WEB_SERVER_H */