summaryrefslogtreecommitdiffstats
path: root/collectors/plugins.d
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:19:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:20:17 +0000
commita64a253794ac64cb40befee54db53bde17dd0d49 (patch)
treec1024acc5f6e508814b944d99f112259bb28b1be /collectors/plugins.d
parentNew upstream version 1.10.0+dfsg (diff)
downloadnetdata-upstream/1.11.0+dfsg.tar.xz
netdata-upstream/1.11.0+dfsg.zip
New upstream version 1.11.0+dfsgupstream/1.11.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--collectors/plugins.d/Makefile.am11
-rw-r--r--collectors/plugins.d/Makefile.in647
-rw-r--r--collectors/plugins.d/README.md472
-rw-r--r--collectors/plugins.d/plugins_d.c (renamed from src/plugins_d.c)4
-rw-r--r--collectors/plugins.d/plugins_d.h (renamed from src/plugins_d.h)16
5 files changed, 1149 insertions, 1 deletions
diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am
new file mode 100644
index 000000000..59250a997
--- /dev/null
+++ b/collectors/plugins.d/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in
new file mode 100644
index 000000000..b2c112811
--- /dev/null
+++ b/collectors/plugins.d/Makefile.in
@@ -0,0 +1,647 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/plugins.d
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/plugins.d/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
new file mode 100644
index 000000000..d3aa5b5b0
--- /dev/null
+++ b/collectors/plugins.d/README.md
@@ -0,0 +1,472 @@
+# Netdata External Plugins
+
+`plugins.d` is the netdata internal plugin that collects metrics
+from external processes, thus allowing netdata to use **external plugins**.
+
+## Provided External Plugins
+
+plugin|language|O/S|description
+:---:|:---:|:---:|:---
+[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
+[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
+[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.
+[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
+[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
+
+Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
+Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
+
+## Motivation
+
+This plugin allows netdata to use **external plugins** for data collection:
+
+1. external data collection plugins may be written in any computer language.
+
+2. external data collection plugins may use O/S capabilities or `setuid` to
+ run with escalated privileges (compared to the netdata daemon).
+ The communication between the external plugin and netdata is unidirectional
+ (from the plugin to netdata), so that netdata cannot manipulate an external
+ plugin running with escalated privileges.
+
+## Operation
+
+Each of the external plugins is expected to run forever.
+Netdata will start it when it starts and stop it when it exits.
+
+If the external plugin exits or crashes, netdata will log an error.
+If the external plugin exits or crashes without pushing metrics to netdata, netdata will not start it again.
+- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
+- Plugins may also be disabled by netdata if they output things that netdata does not understand.
+
+The `stdout` of external plugins is connected to netdata to receive metrics,
+with the API defined below.
+
+The `stderr` of external plugins is connected to netdata `error.log`.
+
+Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds.
+
+## Configuration
+
+Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
+
+The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where netdata runs, with a boolean setting to enable them or not.
+
+Example:
+
+```
+[plugins]
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+
+ # charts.d = yes
+ # fping = yes
+ # node.d = yes
+ # python.d = yes
+```
+
+The setting `enable running new plugins` changes the default behavior for all external plugins.
+So if set to `no`, only the plugins that are explicitly set to `yes` will be run.
+
+The setting `check for new plugins every` controls the time the directory `/usr/libexec/netdata/plugins.d`
+will be rescanned for new plugins. So, new plugins can give added anytime.
+
+For each of the external plugins enabled, another `netdata.conf` section
+is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
+This section allows controlling the update frequency of the plugin and provide
+additional command line arguments to it.
+
+For example, for `apps.plugin` the following section is available:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+- `update every` controls the granularity of the external plugin.
+- `command options` allows giving additional command line options to the plugin.
+
+
+Netdata will provide to the extrenal plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
+
+Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1).
+
+Other than the above, the plugin configuration is up to the plugin.
+
+Keep in mind, that the user may use netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
+
+### Autoconfiguration
+
+Plugins should attempt to autoconfigure themselves when possible.
+
+For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that netdata will not attempt to start the plugin again.
+
+## External Plugins API
+
+Any program that can print a few values to its standard output can become a netdata external plugin.
+
+There are 7 lines netdata parses. lines starting with:
+
+- `CHART` - create or update a chart
+- `DIMENSION` - add or update a dimension to the chart just created
+- `BEGIN` - initialize data collection for a chart
+- `SET` - set the value of a dimension for the initialized chart
+- `END` - complete data collection for the initialized chart
+- `FLUSH` - ignore the last collected values
+- `DISABLE` - disable this plugin
+
+a single program can produce any number of charts with any number of dimensions each.
+
+Charts can be added any time (not just the beginning).
+
+### command line parameters
+
+The plugin **MUST** accept just **one** parameter: **the number of seconds it is
+expected to update the values for its charts**. The value passed by netdata
+to the plugin is controlled via its configuration file (so there is no need
+for the plugin to handle this configuration option).
+
+The external plugin can overwrite the update frequency. For example, the server may
+request per second updates, but the plugin may ignore it and update its charts
+every 5 seconds.
+
+### environment variables
+
+There are a few environment variables that are set by `netdata` and are
+available for the plugin to use.
+
+variable|description
+:------:|:----------
+`NETDATA_USER_CONFIG_DIR`|The directory where all netdata related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).
+`NETDATA_STOCK_CONFIG_DIR`|The directory where all netdata related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).
+`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
+`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
+`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
+`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
+`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
+`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain netdata debugging features. Check **[[Tracing Options]]** for more information.
+`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
+
+
+### The output of the plugin
+
+The plugin should output instructions for netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
+
+#### DISABLE
+
+`DISABLE` will disable this plugin. This will prevent netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
+
+#### CHART
+
+`CHART` defines a new chart.
+
+the template is:
+
+> CHART type.id name title units [family [context [charttype [priority [update_every [options [plugin [module]]]]]]]]
+
+ where:
+ - `type.id`
+
+ uniquely identifies the chart,
+ this is what will be needed to add values to the chart
+
+ the `type` part controls the menu the charts will appear in
+
+ - `name`
+
+ is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it.
+
+ - `title`
+
+ the text above the chart
+
+ - `units`
+
+ the label of the vertical axis of the chart,
+ all dimensions added to a chart should have the same units
+ of measurement
+
+ - `family`
+
+ is used to group charts together
+ (for example all eth0 charts should say: eth0),
+ if empty or missing, the `id` part of `type.id` will be used
+
+ this controls the sub-menu on the dashboard
+
+ - `context`
+
+ the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
+
+ this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it
+
+ - `charttype`
+
+ one of `line`, `area` or `stacked`,
+ if empty or missing, the `line` will be used
+
+ - `priority`
+
+ is the relative priority of the charts as rendered on the web page,
+ lower numbers make the charts appear before the ones with higher numbers,
+ if empty or missing, `1000` will be used
+
+ - `update_every`
+
+ overwrite the update frequency set by the server,
+ if empty or missing, the user configured value will be used
+
+ - `options`
+
+ a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in netdata v1.7 and the `hidden` option was added in 1.10.
+
+ - `plugin` and `module`
+
+ both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
+
+
+#### DIMENSION
+
+`DIMENSION` defines a new dimension for the chart
+
+the template is:
+
+> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
+
+ where:
+
+ - `id`
+
+ the `id` of this dimension (it is a text value, not numeric),
+ this will be needed later to add values to the dimension
+
+ We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
+
+ - `name`
+
+ the name of the dimension as it will appear at the legend of the chart,
+ if empty or missing the `id` will be used
+
+ - `algorithm`
+
+ one of:
+
+ * `absolute`
+
+ the value is to drawn as-is (interpolated to second boundary),
+ if `algorithm` is empty, invalid or missing, `absolute` is used
+
+ * `incremental`
+
+ the value increases over time,
+ the difference from the last value is presented in the chart,
+ the server interpolates the value and calculates a per second figure
+
+ * `percentage-of-absolute-row`
+
+ the % of this value compared to the total of all dimensions
+
+ * `percentage-of-incremental-row`
+
+ the % of this value compared to the incremental total of
+ all dimensions
+
+ - `multiplier`
+
+ an integer value to multiply the collected value,
+ if empty or missing, `1` is used
+
+ - `divisor`
+
+ an integer value to divide the collected value,
+ if empty or missing, `1` is used
+
+ - `hidden`
+
+ giving the keyword `hidden` will make this dimension hidden,
+ it will take part in the calculations but will not be presented in the chart
+
+
+#### VARIABLE
+
+> VARIABLE [SCOPE] name = value
+
+`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept).
+
+Variables support 2 scopes:
+
+- `GLOBAL` or `HOST` to define the variable at the host level.
+- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates.
+
+The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
+
+These variables can be set and updated at any point.
+
+Variable names should use alphanumeric characters, the `.` and the `_`.
+
+The `value` is floating point (netdata used `long double`).
+
+Variables are transferred to upstream netdata servers (streaming and database replication).
+
+## Data collection
+
+data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
+
+> BEGIN type.id [microseconds]
+
+ - `type.id`
+
+ is the unique identification of the chart (as given in `CHART`)
+
+ - `microseconds`
+
+ is the number of microseconds since the last update of the chart. It is optional.
+
+ Under heavy system load, the system may have some latency transferring
+ data from the plugins to netdata via the pipe. This number improves
+ accuracy significantly, since the plugin is able to calculate the
+ duration between its iterations better than netdata.
+
+ The first time the plugin is started, no microseconds should be given
+ to netdata.
+
+> SET id = value
+
+ - `id`
+
+ is the unique identification of the dimension (of the chart just began)
+
+ - `value`
+
+ is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
+
+> END
+
+ END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit.
+
+More `SET` lines may appear to update all the dimensions of the chart.
+All of them in one `BEGIN` -> `END` block.
+
+All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
+same chart.
+
+If more charts need to be updated, each chart should have its own
+`BEGIN` -> `SET` -> `END` block.
+
+If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
+it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
+all the values collected since the last `BEGIN` command.
+
+If a plugin does not behave properly (outputs invalid lines, or does not
+follow these guidelines), will be disabled by netdata.
+
+### collected values
+
+netdata will collect any **signed** value in the 64bit range:
+`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
+
+If a value is not collected, leave it empty, like this:
+
+`SET id = `
+
+or do not output the line at all.
+
+## Modular Plugins
+
+1. **python**, use `python.d.plugin`, there are many examples in the [python.d directory](../python.d.plugin)
+
+ python is ideal for netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
+
+2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d directory](../node.d.plugin)
+
+ node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
+
+3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d directory](../charts.d.plugin)
+
+ BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
+
+4. **C**
+
+ Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C.
+
+---
+
+## Writing Plugins Properly
+
+There are a few rules for writing plugins properly:
+
+1. Respect system resources
+
+ Pay special attention to efficiency:
+
+ - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
+ - Do the absolutely minimum while iterating to collect values repeatedly.
+ - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
+ - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation white iterating to collect values.
+ - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
+
+2. The best way to iterate at a constant pace is this pseudo code:
+
+```js
+ var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
+
+ readConfiguration();
+
+ if(!verifyWeCanCollectValues()) {
+ print "DISABLE";
+ exit(1);
+ }
+
+ createCharts(); /* print CHART and DIMENSION statements */
+
+ var loops = 0;
+ var last_run = 0;
+ var next_run = 0;
+ var dt_since_last_run = 0;
+ var now = 0;
+
+ FOREVER {
+ /* find the current time in milliseconds */
+ now = currentTimeStampInMilliseconds();
+
+ /*
+ * find the time of the next loop
+ * this makes sure we are always aligned
+ * with the netdata daemon
+ */
+ next_run = now - (now % update_every) + update_every;
+
+ /*
+ * wait until it is time
+ * it is important to do it in a loop
+ * since many wait functions can be interrupted
+ */
+ while( now < next_run ) {
+ sleepMilliseconds(next_run - now);
+ now = currentTimeStampInMilliseconds();
+ }
+
+ /* calculate the time passed since the last run */
+ if ( loops > 0 )
+ dt_since_last_run = (now - last_run) * 1000; /* in microseconds */
+
+ /* prepare for the next loop */
+ last_run = now;
+ loops++;
+
+ /* do your magic here to collect values */
+ collectValues();
+
+ /* send the collected data to netdata */
+ printValues(dt_since_last_run); /* print BEGIN, SET, END statements */
+ }
+```
+
+ Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection.
+
+ Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed.
+
+3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
+
+4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
diff --git a/src/plugins_d.c b/collectors/plugins.d/plugins_d.c
index 5693dda06..465ecd796 100644
--- a/src/plugins_d.c
+++ b/collectors/plugins.d/plugins_d.c
@@ -1,4 +1,6 @@
-#include "common.h"
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugins_d.h"
char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { NULL };
char *netdata_configured_plugins_dir_base;
diff --git a/src/plugins_d.h b/collectors/plugins.d/plugins_d.h
index 692d7cae1..adccf3f0f 100644
--- a/src/plugins_d.h
+++ b/collectors/plugins.d/plugins_d.h
@@ -1,6 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
#ifndef NETDATA_PLUGINS_D_H
#define NETDATA_PLUGINS_D_H 1
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_PLUGINSD \
+ { \
+ .name = "PLUGINSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = pluginsd_main \
+ },
+
+
#define PLUGINSD_FILE_SUFFIX ".plugin"
#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)