summaryrefslogtreecommitdiffstats
path: root/python.d
diff options
context:
space:
mode:
Diffstat (limited to 'python.d')
-rw-r--r--python.d/Makefile.am16
-rw-r--r--python.d/Makefile.in182
-rw-r--r--python.d/README.md378
-rw-r--r--python.d/bind_rndc.chart.py48
-rw-r--r--python.d/cpufreq.chart.py1
-rw-r--r--python.d/elasticsearch.chart.py401
-rw-r--r--python.d/fail2ban.chart.py97
-rw-r--r--python.d/freeradius.chart.py18
-rw-r--r--python.d/gunicorn_log.chart.py72
-rw-r--r--python.d/isc_dhcpd.chart.py37
-rw-r--r--python.d/mongodb.chart.py672
-rw-r--r--python.d/mysql.chart.py546
-rw-r--r--python.d/nginx_log.chart.py82
-rw-r--r--python.d/nsd.chart.py93
-rw-r--r--[-rwxr-xr-x]python.d/phpfpm.chart.py183
-rw-r--r--python.d/postgres.chart.py526
-rw-r--r--[-rwxr-xr-x]python.d/python_modules/__init__.py0
-rw-r--r--python.d/python_modules/base.py372
-rw-r--r--python.d/smartd_log.chart.py221
-rw-r--r--python.d/tomcat.chart.py91
-rw-r--r--python.d/varnish.chart.py18
-rw-r--r--python.d/web_log.chart.py653
22 files changed, 3334 insertions, 1373 deletions
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
index 883f06c4c..bfe28ff28 100644
--- a/python.d/Makefile.am
+++ b/python.d/Makefile.am
@@ -8,6 +8,11 @@ include $(top_srcdir)/build/subst.inc
SUFFIXES = .in
dist_python_SCRIPTS = \
+ python-modules-installer.sh \
+ $(NULL)
+
+dist_python_DATA = \
+ README.md \
apache.chart.py \
apache_cache.chart.py \
bind_rndc.chart.py \
@@ -19,16 +24,16 @@ dist_python_SCRIPTS = \
exim.chart.py \
fail2ban.chart.py \
freeradius.chart.py \
- gunicorn_log.chart.py \
haproxy.chart.py \
hddtemp.chart.py \
ipfs.chart.py \
isc_dhcpd.chart.py \
mdstat.chart.py \
memcached.chart.py \
+ mongodb.chart.py \
mysql.chart.py \
nginx.chart.py \
- nginx_log.chart.py \
+ nsd.chart.py \
ovpn_status_log.chart.py \
phpfpm.chart.py \
postfix.chart.py \
@@ -37,13 +42,10 @@ dist_python_SCRIPTS = \
retroshare.chart.py \
sensors.chart.py \
squid.chart.py \
+ smartd_log.chart.py \
tomcat.chart.py \
varnish.chart.py \
- python-modules-installer.sh \
- $(NULL)
-
-dist_python_DATA = \
- README.md \
+ web_log.chart.py \
$(NULL)
pythonmodulesdir=$(pythondir)/python_modules
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
index 018e8b3eb..9b7846682 100644
--- a/python.d/Makefile.in
+++ b/python.d/Makefile.in
@@ -1,8 +1,9 @@
-# Makefile.in generated by automake 1.15 from Makefile.am.
+# Makefile.in generated by automake 1.11.3 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2014 Free Software Foundation, Inc.
-
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -16,61 +17,6 @@
VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -89,10 +35,14 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
+DIST_COMMON = $(dist_python_DATA) $(dist_python_SCRIPTS) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
subdir = python.d
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
- $(top_srcdir)/m4/ax_c__generic.m4 \
+ $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
$(top_srcdir)/m4/ax_c_mallinfo.m4 \
$(top_srcdir)/m4/ax_c_mallopt.m4 \
$(top_srcdir)/m4/ax_check_compile_flag.m4 \
@@ -101,10 +51,6 @@ am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
- $(dist_python_DATA) $(dist_pythonmodules_DATA) \
- $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \
- $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
@@ -140,33 +86,13 @@ am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythondir)" \
"$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \
"$(DESTDIR)$(pythonyaml3dir)"
SCRIPTS = $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
SOURCES =
DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
DATA = $(dist_python_DATA) $(dist_pythonmodules_DATA) \
$(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -190,7 +116,11 @@ INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
LIBMNL_LIBS = @LIBMNL_LIBS@
LIBOBJS = @LIBOBJS@
@@ -204,6 +134,10 @@ MKDIR_P = @MKDIR_P@
NFACCT_CFLAGS = @NFACCT_CFLAGS@
NFACCT_LIBS = @NFACCT_LIBS@
OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
@@ -307,6 +241,11 @@ CLEANFILES = \
SUFFIXES = .in
dist_python_SCRIPTS = \
+ python-modules-installer.sh \
+ $(NULL)
+
+dist_python_DATA = \
+ README.md \
apache.chart.py \
apache_cache.chart.py \
bind_rndc.chart.py \
@@ -318,16 +257,16 @@ dist_python_SCRIPTS = \
exim.chart.py \
fail2ban.chart.py \
freeradius.chart.py \
- gunicorn_log.chart.py \
haproxy.chart.py \
hddtemp.chart.py \
ipfs.chart.py \
isc_dhcpd.chart.py \
mdstat.chart.py \
memcached.chart.py \
+ mongodb.chart.py \
mysql.chart.py \
nginx.chart.py \
- nginx_log.chart.py \
+ nsd.chart.py \
ovpn_status_log.chart.py \
phpfpm.chart.py \
postfix.chart.py \
@@ -336,13 +275,10 @@ dist_python_SCRIPTS = \
retroshare.chart.py \
sensors.chart.py \
squid.chart.py \
+ smartd_log.chart.py \
tomcat.chart.py \
varnish.chart.py \
- python-modules-installer.sh \
- $(NULL)
-
-dist_python_DATA = \
- README.md \
+ web_log.chart.py \
$(NULL)
pythonmodulesdir = $(pythondir)/python_modules
@@ -411,6 +347,7 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu python.d/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --gnu python.d/Makefile
+.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
@@ -419,7 +356,7 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
-$(top_srcdir)/build/subst.inc $(am__empty):
+$(top_srcdir)/build/subst.inc:
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
@@ -431,11 +368,8 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
@$(NORMAL_INSTALL)
+ test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)"
@list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
@@ -466,11 +400,8 @@ uninstall-dist_pythonSCRIPTS:
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
install-dist_pythonDATA: $(dist_python_DATA)
@$(NORMAL_INSTALL)
+ test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)"
@list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -487,11 +418,8 @@ uninstall-dist_pythonDATA:
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
@$(NORMAL_INSTALL)
+ test -z "$(pythonmodulesdir)" || $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)"
@list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -508,11 +436,8 @@ uninstall-dist_pythonmodulesDATA:
dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
@$(NORMAL_INSTALL)
+ test -z "$(pythonyaml2dir)" || $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)"
@list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -529,11 +454,8 @@ uninstall-dist_pythonyaml2DATA:
dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
@$(NORMAL_INSTALL)
+ test -z "$(pythonyaml3dir)" || $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)"
@list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -548,11 +470,11 @@ uninstall-dist_pythonyaml3DATA:
@list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
+tags: TAGS
+TAGS:
-cscope cscopelist:
+ctags: CTAGS
+CTAGS:
distdir: $(DISTFILES)
@@ -696,24 +618,22 @@ uninstall-am: uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_pythonDATA \
- install-dist_pythonSCRIPTS install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_pythonDATA \
- uninstall-dist_pythonSCRIPTS uninstall-dist_pythonmodulesDATA \
+.PHONY: all all-am check check-am clean clean-generic distclean \
+ distclean-generic distdir dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
+ install-dist_pythonyaml3DATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am uninstall uninstall-am \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_pythonmodulesDATA \
uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA
-.PRECIOUS: Makefile
-
.in:
if sed \
-e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
diff --git a/python.d/README.md b/python.d/README.md
index 75f5614a7..7df6e3e86 100644
--- a/python.d/README.md
+++ b/python.d/README.md
@@ -704,6 +704,149 @@ If no configuration is given, module will attempt to connect to memcached instan
---
+# mongodb
+
+Module monitor mongodb performance and health metrics
+
+**Requirements:**
+ * `python-pymongo` package.
+
+You need to install it manually.
+
+
+Number of charts depends on mongodb version, storage engine and other features (replication):
+
+1. **Read requests**:
+ * query
+ * getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+ * insert
+ * delete
+ * update
+
+3. **Active clients**:
+ * readers (number of clients with read operations in progress or queued)
+ * writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+ * commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+ * volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+ * average ms (average time taken by flushes to execute)
+ * last ms (time taken by the last flush)
+
+8. **Read tickets** (WiredTiger):
+ * in use (number of read tickets in use)
+ * available (number of available read tickets remaining)
+
+9. **Write tickets** (WiredTiger):
+ * in use (number of write tickets in use)
+ * available (number of available write tickets remaining)
+
+10. **Cursors**:
+ * opened (number of cursors currently opened by MongoDB for clients)
+ * timedOut (number of cursors that have timed)
+ * noTimeout (number of open cursors with timeout disabled)
+
+11. **Connections**:
+ * connected (number of clients currently connected to the database server)
+ * unused (number of unused connections available for new clients)
+
+12. **Memory usage metrics**:
+ * virtual
+ * resident (amount of memory used by the database process)
+ * mapped
+ * non mapped
+
+13. **Page faults**:
+ * page faults (number of times MongoDB had to request from disk)
+
+14. **Cache metrics** (WiredTiger):
+ * percentage of bytes currently in the cache (amount of space taken by cached data)
+ * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+15. **Pages evicted from cache** (WiredTiger):
+ * modified
+ * unmodified
+
+16. **Queued requests**:
+ * readers (number of read request currently queued)
+ * writers (number of write request currently queued)
+
+17. **Errors**:
+ * msg (number of message assertions raised)
+ * warning (number of warning assertions raised)
+ * regular (number of regular assertions raised)
+ * user (number of assertions corresponding to errors generated by users)
+
+18. **Storage metrics** (one chart for every database)
+ * dataSize (size of all documents + padding in the database)
+ * indexSize (size of all indexes in the database)
+ * storageSize (size of all extents in the database)
+
+19. **Documents in the database** (one chart for all databases)
+ * documents (number of objects in the database among all the collections)
+
+20. **tcmalloc metrics**
+ * central cache free
+ * current total thread cache
+ * pageheap free
+ * pageheap unmapped
+ * thread cache free
+ * transfer cache free
+ * heap size
+
+21. **Commands total/failed rate**
+ * count
+ * createIndex
+ * delete
+ * eval
+ * findAndModify
+ * insert
+
+22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+ * Global lock
+ * Database lock
+ * Collection lock
+ * Metadata lock
+ * oplog lock
+
+23. **Replica set members state**
+ * state
+
+24. **Oplog window**
+ * window (interval of time between the oldest and the latest entries in the oplog)
+
+25. **Replication lag**
+ * member (time when last entry from the oplog was applied for every member)
+
+26. **Replication set member heartbeat latency**
+ * member (time when last heartbeat was received from replica set member)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
+ user : 'netdata'
+ pass : 'netdata'
+
+```
+
+If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
+
+---
+
+
# mysql
Module monitors one or more mysql servers
@@ -841,30 +984,58 @@ Without configuration, module attempts to connect to `http://localhost/stub_stat
---
-# nginx_log
+# nsd
-Module monitors nginx access log and produces only one chart:
+Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
-1. **nginx status codes** in requests/s
- * 2xx
- * 3xx
- * 4xx
- * 5xx
+**Requirements:**
+ * Version of `nsd` must be 4.0+
+ * Netdata must have permissions to run `nsd-control stats_noreset`
-### configuration
+It produces:
-Sample for two vhosts:
+1. **Queries**
+ * queries
-```yaml
-site_A:
- path: '/var/log/nginx/access-A.log'
+2. **Zones**
+ * master
+ * slave
-site_B:
- name: 'local'
- path: '/var/log/nginx/access-B.log'
-```
+3. **Protocol**
+ * udp
+ * udp6
+ * tcp
+ * tcp6
+
+4. **Query Type**
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * HINFO
+ * MX
+ * NAPTR
+ * TXT
+ * AAAA
+ * SRV
+ * ANY
+
+5. **Transfer**
+ * NOTIFY
+ * AXFR
+
+6. **Return Code**
+ * NOERROR
+ * FORMERR
+ * SERVFAIL
+ * NXDOMAIN
+ * NOTIMP
+ * REFUSED
+ * YXDOMAIN
-When no configuration file is found, module tries to parse `/var/log/nginx/access.log` file.
+
+Configuration is not needed.
---
@@ -958,6 +1129,75 @@ Configuration is not needed.
---
+# postgres
+
+Module monitors one or more postgres servers.
+
+**Requirements:**
+
+ * `python-psycopg2` package. You have to install to manually.
+
+Following charts are drawn:
+
+1. **Database size** MB
+ * size
+
+2. **Current Backend Processes** processes
+ * active
+
+3. **Write-Ahead Logging Statistics** files/s
+ * total
+ * ready
+ * done
+
+4. **Checkpoints** writes/s
+ * scheduled
+ * requested
+
+5. **Current connections to db** count
+ * connections
+
+6. **Tuples returned from db** tuples/s
+ * sequential
+ * bitmap
+
+7. **Tuple reads from db** reads/s
+ * disk
+ * cache
+
+8. **Transactions on db** transactions/s
+ * commited
+ * rolled back
+
+9. **Tuples written to db** writes/s
+ * inserted
+ * updated
+ * deleted
+ * conflicts
+
+10. **Locks on db** count per type
+ * locks
+
+### configuration
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
+
+---
+
# redis
Get INFO data from redis instance.
@@ -1052,6 +1292,45 @@ Without any configuration module will try to autodetect where squid presents its
---
+# smartd_log
+
+Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+It produces following charts (you can add additional attributes in the module configuration file):
+
+1. **Read Error Rate** attribute 1
+
+2. **Start/Stop Count** attribute 4
+
+3. **Reallocated Sectors Count** attribute 5
+
+4. **Seek Error Rate** attribute 7
+
+5. **Power-On Hours Count** attribute 9
+
+6. **Power Cycle Count** attribute 12
+
+7. **Load/Unload Cycles** attribute 193
+
+8. **Temperature** attribute 194
+
+9. **Current Pending Sectors** attribute 197
+
+10. **Off-Line Uncorrectable** attribute 198
+
+11. **Write Error Rate** attribute 200
+
+### configuration
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
+
+---
+
# tomcat
Present tomcat containers memory utilization.
@@ -1151,3 +1430,68 @@ It produces:
No configuration is needed.
---
+
+# web_log
+
+Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
+
+It produces following charts:
+
+1. **Response by type** requests/s
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Response by code family** requests/s
+ * 1xx (informational)
+ * 2xx (successful)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 5xx (internal server errors)
+ * other (non-standart responses)
+ * unmatched (the lines in the log file that are not matched)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Bandwidth** KB/s
+ * received (bandwidth of requests)
+ * send (bandwidth of responses)
+
+5. **Timings** ms (request processing time)
+ * min (bandwidth of requests)
+ * max (bandwidth of responses)
+ * average (bandwidth of responses)
+
+6. **Request per url** requests/s (configured by user)
+
+7. **Http Methods** requests/s (requests per http method)
+
+8. **Http Versions** requests/s (requests per http version)
+
+9. **IP protocols** requests/s (requests per ip protocol version)
+
+10. **Curent Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
+
+11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
+
+
+### configuration
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+
+---
diff --git a/python.d/bind_rndc.chart.py b/python.d/bind_rndc.chart.py
index e11c751f5..a4d753703 100644
--- a/python.d/bind_rndc.chart.py
+++ b/python.d/bind_rndc.chart.py
@@ -4,7 +4,7 @@
from base import SimpleService
from re import compile, findall
-from os.path import getsize, isfile, split
+from os.path import getsize, split
from os import access as is_accessible, R_OK
from subprocess import Popen
@@ -12,7 +12,6 @@ priority = 60000
retries = 60
update_every = 30
-DIRECTORIES = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
NMS = ['requests', 'responses', 'success', 'auth_answer', 'nonauth_answer', 'nxrrset', 'failure',
'nxdomain', 'recursion', 'duplicate', 'rejections']
QUERIES = ['RESERVED0', 'A', 'NS', 'CNAME', 'SOA', 'PTR', 'MX', 'TXT', 'X25', 'AAAA', 'SRV', 'NAPTR',
@@ -29,16 +28,12 @@ class Service(SimpleService):
# 'Cache DB RRsets', 'Socket I/O Statistics']
self.options = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
self.regex_options = [r'(%s(?= \+\+)) \+\+([^\+]+)' % option for option in self.options]
- try:
- self.rndc = [''.join([directory, 'rndc']) for directory in DIRECTORIES
- if isfile(''.join([directory, 'rndc']))][0]
- except IndexError:
- self.rndc = False
+ self.rndc = self.find_binary('rndc')
def check(self):
# We cant start without 'rndc' command
if not self.rndc:
- self.error('Command "rndc" not found')
+ self.error('Can\'t locate \'rndc\' binary or binary is not executable by netdata')
return False
# We cant start if stats file is not exist or not readable by netdata user
@@ -55,15 +50,15 @@ class Service(SimpleService):
if not run_rndc.returncode:
# 'rndc' was found, stats file is exist and readable and we can run 'rndc stats'. Lets go!
self.create_charts()
-
+
# BIND APPEND dump on every run 'rndc stats'
# that is why stats file size can be VERY large if update_interval too small
dump_size_24hr = round(86400 / self.update_every * (int(size_after) - int(size_before)) / 1048576, 3)
-
+
# If update_every too small we should WARN user
if self.update_every < 30:
self.info('Update_every %s is NOT recommended for use. Increase the value to > 30' % self.update_every)
-
+
self.info('With current update_interval it will be + %s MB every 24hr. '
'Don\'t forget to create logrotate conf file for %s' % (dump_size_24hr, self.named_stats_path))
@@ -82,12 +77,11 @@ class Service(SimpleService):
named.stats file size
)
"""
-
try:
current_size = getsize(self.named_stats_path)
except OSError:
return None, None
-
+
run_rndc = Popen([self.rndc, 'stats'], shell=False)
run_rndc.wait()
@@ -115,23 +109,23 @@ class Service(SimpleService):
return None
rndc_stats = dict()
-
+
# Result: dict.
# topic = Cache DB RRsets; body = A 178303 NS 86790 ... ; desc = A; value = 178303
# {'Cache DB RRsets': [('A', 178303), ('NS', 286790), ...],
# {Incoming Queries': [('RESERVED0', 8), ('A', 4557317680), ...],
# ......
for regex in self.regex_options:
- rndc_stats.update({topic: [(desc, int(value)) for value, desc in self.regex_values.findall(body)]
- for topic, body in findall(regex, raw_data)})
-
+ rndc_stats.update(dict([(topic, [(desc, int(value)) for value, desc in self.regex_values.findall(body)])
+ for topic, body in findall(regex, raw_data)]))
+
nms = dict(rndc_stats.get('Name Server Statistics', []))
- inc_queries = {'i' + k: 0 for k in QUERIES}
- inc_queries.update({'i' + k: v for k, v in rndc_stats.get('Incoming Queries', [])})
- out_queries = {'o' + k: 0 for k in QUERIES}
- out_queries.update({'o' + k: v for k, v in rndc_stats.get('Outgoing Queries', [])})
-
+ inc_queries = dict([('i' + k, 0) for k in QUERIES])
+ inc_queries.update(dict([('i' + k, v) for k, v in rndc_stats.get('Incoming Queries', [])]))
+ out_queries = dict([('o' + k, 0) for k in QUERIES])
+ out_queries.update(dict([('o' + k, v) for k, v in rndc_stats.get('Outgoing Queries', [])]))
+
to_netdata = dict()
to_netdata['requests'] = sum([v for k, v in nms.items() if 'request' in k and 'received' in k])
to_netdata['responses'] = sum([v for k, v in nms.items() if 'responses' in k and 'sent' in k])
@@ -145,7 +139,7 @@ class Service(SimpleService):
to_netdata['duplicate'] = nms.get('duplicate queries received', 0)
to_netdata['rejections'] = nms.get('recursive queries rejected', 0)
to_netdata['stats_size'] = size
-
+
to_netdata.update(inc_queries)
to_netdata.update(out_queries)
return to_netdata
@@ -154,20 +148,20 @@ class Service(SimpleService):
self.order = ['stats_size', 'bind_stats', 'incoming_q', 'outgoing_q']
self.definitions = {
'bind_stats': {
- 'options': [None, 'Name Server Statistics', 'stats', 'Name Server Statistics', 'bind_rndc.stats', 'line'],
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics', 'bind_rndc.stats', 'line'],
'lines': [
]},
'incoming_q': {
- 'options': [None, 'Incoming queries', 'queries','Incoming queries', 'bind_rndc.incq', 'line'],
+ 'options': [None, 'Incoming queries', 'queries','incoming queries', 'bind_rndc.incq', 'line'],
'lines': [
]},
'outgoing_q': {
- 'options': [None, 'Outgoing queries', 'queries','Outgoing queries', 'bind_rndc.outq', 'line'],
+ 'options': [None, 'Outgoing queries', 'queries','outgoing queries', 'bind_rndc.outq', 'line'],
'lines': [
]},
'stats_size': {
'options': [None, '%s file size' % split(self.named_stats_path)[1].capitalize(), 'megabytes',
- '%s size' % split(self.named_stats_path)[1].capitalize(), 'bind_rndc.size', 'line'],
+ '%s size' % split(self.named_stats_path)[1], 'bind_rndc.size', 'line'],
'lines': [
["stats_size", None, "absolute", 1, 1048576]
]}
diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py
index c761aae88..e28bdea8f 100644
--- a/python.d/cpufreq.chart.py
+++ b/python.d/cpufreq.chart.py
@@ -62,6 +62,7 @@ class Service(SimpleService):
return data
else:
self.alert("accurate method failed, falling back")
+ self.accurate_exists = False
for name, paths in self.assignment.items():
diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py
index ff841f17c..430227f69 100644
--- a/python.d/elasticsearch.chart.py
+++ b/python.d/elasticsearch.chart.py
@@ -3,13 +3,14 @@
# Author: l2isbad
from base import UrlService
-from requests import get
-from socket import gethostbyname
+from socket import gethostbyname, gaierror
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread
+from collections import namedtuple
+from json import loads
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -17,45 +18,125 @@ update_every = 5
priority = 60000
retries = 60
+METHODS = namedtuple('METHODS', ['get_data_function', 'url'])
+
+NODE_STATS = [
+ ('indices.search.fetch_current', None, None),
+ ('indices.search.fetch_total', None, None),
+ ('indices.search.query_current', None, None),
+ ('indices.search.query_total', None, None),
+ ('indices.search.query_time_in_millis', None, None),
+ ('indices.search.fetch_time_in_millis', None, None),
+ ('indices.indexing.index_total', 'indexing_index_total', None),
+ ('indices.indexing.index_current', 'indexing_index_current', None),
+ ('indices.indexing.index_time_in_millis', 'indexing_index_time_in_millis', None),
+ ('indices.refresh.total', 'refresh_total', None),
+ ('indices.refresh.total_time_in_millis', 'refresh_total_time_in_millis', None),
+ ('indices.flush.total', 'flush_total', None),
+ ('indices.flush.total_time_in_millis', 'flush_total_time_in_millis', None),
+ ('jvm.gc.collectors.young.collection_count', 'young_collection_count', None),
+ ('jvm.gc.collectors.old.collection_count', 'old_collection_count', None),
+ ('jvm.gc.collectors.young.collection_time_in_millis', 'young_collection_time_in_millis', None),
+ ('jvm.gc.collectors.old.collection_time_in_millis', 'old_collection_time_in_millis', None),
+ ('jvm.mem.heap_used_percent', 'jvm_heap_percent', None),
+ ('jvm.mem.heap_committed_in_bytes', 'jvm_heap_commit', None),
+ ('thread_pool.bulk.queue', 'bulk_queue', None),
+ ('thread_pool.bulk.rejected', 'bulk_rejected', None),
+ ('thread_pool.index.queue', 'index_queue', None),
+ ('thread_pool.index.rejected', 'index_rejected', None),
+ ('thread_pool.search.queue', 'search_queue', None),
+ ('thread_pool.search.rejected', 'search_rejected', None),
+ ('thread_pool.merge.queue', 'merge_queue', None),
+ ('thread_pool.merge.rejected', 'merge_rejected', None),
+ ('indices.fielddata.memory_size_in_bytes', 'index_fdata_memory', None),
+ ('indices.fielddata.evictions', None, None),
+ ('breakers.fielddata.tripped', None, None),
+ ('http.current_open', 'http_current_open', None),
+ ('transport.rx_size_in_bytes', 'transport_rx_size_in_bytes', None),
+ ('transport.tx_size_in_bytes', 'transport_tx_size_in_bytes', None),
+ ('process.max_file_descriptors', None, None),
+ ('process.open_file_descriptors', None, None)
+]
+
+CLUSTER_STATS = [
+ ('nodes.count.data_only', 'count_data_only', None),
+ ('nodes.count.master_data', 'count_master_data', None),
+ ('nodes.count.total', 'count_total', None),
+ ('nodes.count.master_only', 'count_master_only', None),
+ ('nodes.count.client', 'count_client', None),
+ ('indices.docs.count', 'docs_count', None),
+ ('indices.query_cache.hit_count', 'query_cache_hit_count', None),
+ ('indices.query_cache.miss_count', 'query_cache_miss_count', None),
+ ('indices.store.size_in_bytes', 'store_size_in_bytes', None),
+ ('indices.count', 'indices_count', None),
+ ('indices.shards.total', 'shards_total', None)
+]
+
+HEALTH_STATS = [
+ ('number_of_nodes', 'health_number_of_nodes', None),
+ ('number_of_data_nodes', 'health_number_of_data_nodes', None),
+ ('number_of_pending_tasks', 'health_number_of_pending_tasks', None),
+ ('number_of_in_flight_fetch', 'health_number_of_in_flight_fetch', None),
+ ('active_shards', 'health_active_shards', None),
+ ('relocating_shards', 'health_relocating_shards', None),
+ ('unassigned_shards', 'health_unassigned_shards', None),
+ ('delayed_unassigned_shards', 'health_delayed_unassigned_shards', None),
+ ('initializing_shards', 'health_initializing_shards', None),
+ ('active_shards_percent_as_number', 'health_active_shards_percent_as_number', None)
+]
+
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['search_perf_total', 'search_perf_time', 'search_latency', 'index_perf_total', 'index_perf_time',
- 'index_latency', 'jvm_mem_heap', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors',
- 'host_metrics_http', 'host_metrics_transport', 'thread_pool_qr', 'fdata_cache', 'fdata_ev_tr',
- 'cluster_health_status', 'cluster_health_nodes', 'cluster_health_shards', 'cluster_stats_nodes',
- 'cluster_stats_query_cache', 'cluster_stats_docs', 'cluster_stats_store', 'cluster_stats_indices_shards']
+ORDER = ['search_perf_total', 'search_perf_current', 'search_perf_time', 'search_latency', 'index_perf_total',
+ 'index_perf_current', 'index_perf_time', 'index_latency', 'jvm_mem_heap', 'jvm_gc_count',
+ 'jvm_gc_time', 'host_metrics_file_descriptors', 'host_metrics_http', 'host_metrics_transport',
+ 'thread_pool_qr_q', 'thread_pool_qr_r', 'fdata_cache', 'fdata_ev_tr', 'cluster_health_status',
+ 'cluster_health_nodes', 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache',
+ 'cluster_stats_docs', 'cluster_stats_store', 'cluster_stats_indices_shards']
CHARTS = {
'search_perf_total': {
- 'options': [None, 'Number of queries, fetches', 'queries', 'Search performance', 'es.search_query', 'stacked'],
+ 'options': [None, 'Total number of queries, fetches', 'number of', 'search performance',
+ 'es.search_query_total', 'stacked'],
+ 'lines': [
+ ['query_total', 'queries', 'incremental'],
+ ['fetch_total', 'fetches', 'incremental']
+ ]},
+ 'search_perf_current': {
+ 'options': [None, 'Number of queries, fetches in progress', 'number of', 'search performance',
+ 'es.search_query_current', 'stacked'],
'lines': [
- ['query_total', 'search_total', 'incremental'],
- ['fetch_total', 'fetch_total', 'incremental'],
- ['query_current', 'search_current', 'absolute'],
- ['fetch_current', 'fetch_current', 'absolute']
+ ['query_current', 'queries', 'absolute'],
+ ['fetch_current', 'fetches', 'absolute']
]},
'search_perf_time': {
- 'options': [None, 'Time spent on queries, fetches', 'seconds', 'Search performance', 'es.search_time', 'stacked'],
+ 'options': [None, 'Time spent on queries, fetches', 'seconds', 'search performance',
+ 'es.search_time', 'stacked'],
'lines': [
['query_time_in_millis', 'query', 'incremental', 1, 1000],
['fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
]},
'search_latency': {
- 'options': [None, 'Query and fetch latency', 'ms', 'Search performance', 'es.search_latency', 'stacked'],
+ 'options': [None, 'Query and fetch latency', 'ms', 'search performance', 'es.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
]},
'index_perf_total': {
- 'options': [None, 'Number of documents indexed, index refreshes, flushes', 'documents/indexes',
- 'Indexing performance', 'es.index_doc', 'stacked'],
+ 'options': [None, 'Total number of documents indexed, index refreshes, index flushes to disk', 'number of',
+ 'indexing performance', 'es.index_performance_total', 'stacked'],
'lines': [
['indexing_index_total', 'indexed', 'incremental'],
['refresh_total', 'refreshes', 'incremental'],
- ['flush_total', 'flushes', 'incremental'],
- ['indexing_index_current', 'indexed_current', 'absolute'],
+ ['flush_total', 'flushes', 'incremental']
+ ]},
+ 'index_perf_current': {
+ 'options': [None, 'Number of documents currently being indexed', 'currently indexed',
+ 'indexing performance', 'es.index_performance_current', 'stacked'],
+ 'lines': [
+ ['indexing_index_current', 'documents', 'absolute']
]},
'index_perf_time': {
- 'options': [None, 'Time spent on indexing, refreshing, flushing', 'seconds', 'Indexing performance',
+ 'options': [None, 'Time spent on indexing, refreshing, flushing', 'seconds', 'indexing performance',
'es.search_time', 'stacked'],
'lines': [
['indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
@@ -63,67 +144,72 @@ CHARTS = {
['flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
]},
'index_latency': {
- 'options': [None, 'Indexing and flushing latency', 'ms', 'Indexing performance',
+ 'options': [None, 'Indexing and flushing latency', 'ms', 'indexing performance',
'es.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
['flushing_latency', 'flushing', 'absolute', 1, 1000]
]},
'jvm_mem_heap': {
- 'options': [None, 'JVM heap currently in use/committed', 'percent/MB', 'Memory usage and gc',
+ 'options': [None, 'JVM heap currently in use/committed', 'percent/MB', 'memory usage and gc',
'es.jvm_heap', 'area'],
'lines': [
['jvm_heap_percent', 'inuse', 'absolute'],
['jvm_heap_commit', 'commit', 'absolute', -1, 1048576]
]},
'jvm_gc_count': {
- 'options': [None, 'Count of garbage collections', 'counts', 'Memory usage and gc', 'es.gc_count', 'stacked'],
+ 'options': [None, 'Count of garbage collections', 'counts', 'memory usage and gc', 'es.gc_count', 'stacked'],
'lines': [
['young_collection_count', 'young', 'incremental'],
['old_collection_count', 'old', 'incremental']
]},
'jvm_gc_time': {
- 'options': [None, 'Time spent on garbage collections', 'ms', 'Memory usage and gc', 'es.gc_time', 'stacked'],
+ 'options': [None, 'Time spent on garbage collections', 'ms', 'memory usage and gc', 'es.gc_time', 'stacked'],
'lines': [
['young_collection_time_in_millis', 'young', 'incremental'],
['old_collection_time_in_millis', 'old', 'incremental']
]},
- 'thread_pool_qr': {
- 'options': [None, 'Number of queued/rejected threads in thread pool', 'threads', 'Queues and rejections',
- 'es.qr', 'stacked'],
+ 'thread_pool_qr_q': {
+ 'options': [None, 'Number of queued threads in thread pool', 'queued threads', 'queues and rejections',
+ 'es.thread_pool_queued', 'stacked'],
'lines': [
- ['bulk_queue', 'bulk_queue', 'absolute'],
- ['index_queue', 'index_queue', 'absolute'],
- ['search_queue', 'search_queue', 'absolute'],
- ['merge_queue', 'merge_queue', 'absolute'],
- ['bulk_rejected', 'bulk_rej', 'absolute'],
- ['index_rejected', 'index_rej', 'absolute'],
- ['search_rejected', 'search_rej', 'absolute'],
- ['merge_rejected', 'merge_rej', 'absolute']
+ ['bulk_queue', 'bulk', 'absolute'],
+ ['index_queue', 'index', 'absolute'],
+ ['search_queue', 'search', 'absolute'],
+ ['merge_queue', 'merge', 'absolute']
+ ]},
+ 'thread_pool_qr_r': {
+ 'options': [None, 'Number of rejected threads in thread pool', 'rejected threads', 'queues and rejections',
+ 'es.thread_pool_rejected', 'stacked'],
+ 'lines': [
+ ['bulk_rejected', 'bulk', 'absolute'],
+ ['index_rejected', 'index', 'absolute'],
+ ['search_rejected', 'search', 'absolute'],
+ ['merge_rejected', 'merge', 'absolute']
]},
'fdata_cache': {
- 'options': [None, 'Fielddata cache size', 'MB', 'Fielddata cache', 'es.fdata_cache', 'line'],
+ 'options': [None, 'Fielddata cache size', 'MB', 'fielddata cache', 'es.fdata_cache', 'line'],
'lines': [
- ['index_fdata_mem', 'mem_size', 'absolute', 1, 1048576]
+ ['index_fdata_memory', 'cache', 'absolute', 1, 1048576]
]},
'fdata_ev_tr': {
'options': [None, 'Fielddata evictions and circuit breaker tripped count', 'number of events',
- 'Fielddata cache', 'es.fdata_ev_tr', 'line'],
+ 'fielddata cache', 'es.evictions_tripped', 'line'],
'lines': [
- ['index_fdata_evic', 'evictions', 'incremental'],
- ['breakers_fdata_trip', 'tripped', 'incremental']
+ ['evictions', None, 'incremental'],
+ ['tripped', None, 'incremental']
]},
'cluster_health_nodes': {
- 'options': [None, 'Nodes and tasks statistics', 'units', 'Cluster health API',
- 'es.cluster_health', 'stacked'],
+ 'options': [None, 'Nodes and tasks statistics', 'units', 'cluster health API',
+ 'es.cluster_health_nodes', 'stacked'],
'lines': [
['health_number_of_nodes', 'nodes', 'absolute'],
['health_number_of_data_nodes', 'data_nodes', 'absolute'],
['health_number_of_pending_tasks', 'pending_tasks', 'absolute'],
- ['health_number_of_in_flight_fetch', 'inflight_fetch', 'absolute']
+ ['health_number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
]},
'cluster_health_status': {
- 'options': [None, 'Cluster status', 'status', 'Cluster health API',
+ 'options': [None, 'Cluster status', 'status', 'cluster health API',
'es.cluster_health_status', 'area'],
'lines': [
['status_green', 'green', 'absolute'],
@@ -134,8 +220,8 @@ CHARTS = {
['status_yellow', 'yellow', 'absolute']
]},
'cluster_health_shards': {
- 'options': [None, 'Shards statistics', 'shards', 'Cluster health API',
- 'es.cluster_health_sharts', 'stacked'],
+ 'options': [None, 'Shards statistics', 'shards', 'cluster health API',
+ 'es.cluster_health_shards', 'stacked'],
'lines': [
['health_active_shards', 'active_shards', 'absolute'],
['health_relocating_shards', 'relocating_shards', 'absolute'],
@@ -145,8 +231,8 @@ CHARTS = {
['health_active_shards_percent_as_number', 'active_percent', 'absolute']
]},
'cluster_stats_nodes': {
- 'options': [None, 'Nodes statistics', 'nodes', 'Cluster stats API',
- 'es.cluster_stats_nodes', 'stacked'],
+ 'options': [None, 'Nodes statistics', 'nodes', 'cluster stats API',
+ 'es.cluster_nodes', 'stacked'],
'lines': [
['count_data_only', 'data_only', 'absolute'],
['count_master_data', 'master_data', 'absolute'],
@@ -155,47 +241,47 @@ CHARTS = {
['count_client', 'client', 'absolute']
]},
'cluster_stats_query_cache': {
- 'options': [None, 'Query cache statistics', 'queries', 'Cluster stats API',
- 'es.cluster_stats_query_cache', 'stacked'],
+ 'options': [None, 'Query cache statistics', 'queries', 'cluster stats API',
+ 'es.cluster_query_cache', 'stacked'],
'lines': [
['query_cache_hit_count', 'hit', 'incremental'],
['query_cache_miss_count', 'miss', 'incremental']
]},
'cluster_stats_docs': {
- 'options': [None, 'Docs statistics', 'count', 'Cluster stats API',
- 'es.cluster_stats_docs', 'line'],
+ 'options': [None, 'Docs statistics', 'count', 'cluster stats API',
+ 'es.cluster_docs', 'line'],
'lines': [
['docs_count', 'docs', 'absolute']
]},
'cluster_stats_store': {
- 'options': [None, 'Store statistics', 'MB', 'Cluster stats API',
- 'es.cluster_stats_store', 'line'],
+ 'options': [None, 'Store statistics', 'MB', 'cluster stats API',
+ 'es.cluster_store', 'line'],
'lines': [
['store_size_in_bytes', 'size', 'absolute', 1, 1048567]
]},
'cluster_stats_indices_shards': {
- 'options': [None, 'Indices and shards statistics', 'count', 'Cluster stats API',
- 'es.cluster_stats_ind_sha', 'stacked'],
+ 'options': [None, 'Indices and shards statistics', 'count', 'cluster stats API',
+ 'es.cluster_indices_shards', 'stacked'],
'lines': [
['indices_count', 'indices', 'absolute'],
['shards_total', 'shards', 'absolute']
]},
'host_metrics_transport': {
- 'options': [None, 'Cluster communication transport metrics', 'kbit/s', 'Host metrics',
- 'es.host_metrics_transport', 'area'],
+ 'options': [None, 'Cluster communication transport metrics', 'kbit/s', 'host metrics',
+ 'es.host_transport', 'area'],
'lines': [
['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
]},
'host_metrics_file_descriptors': {
- 'options': [None, 'Available file descriptors in percent', 'percent', 'Host metrics',
- 'es.host_metrics_descriptors', 'area'],
+ 'options': [None, 'Available file descriptors in percent', 'percent', 'host metrics',
+ 'es.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
]},
'host_metrics_http': {
- 'options': [None, 'Opened HTTP connections', 'connections', 'Host metrics',
- 'es.host_metrics_http', 'line'],
+ 'options': [None, 'Opened HTTP connections', 'connections', 'host metrics',
+ 'es.host_http_connections', 'line'],
'lines': [
['http_current_open', 'opened', 'absolute', 1, 1]
]}
@@ -208,78 +294,72 @@ class Service(UrlService):
self.order = ORDER
self.definitions = CHARTS
self.host = self.configuration.get('host')
- self.port = self.configuration.get('port')
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
+ self.port = self.configuration.get('port', 9200)
+ self.scheme = self.configuration.get('scheme', 'http')
self.latency = dict()
+ self.methods = list()
def check(self):
# We can't start if <host> AND <port> not specified
- if not all([self.host, self.port]):
+ if not all([self.host, self.port, isinstance(self.host, str), isinstance(self.port, (str, int))]):
+ self.error('Host is not defined in the module configuration file')
return False
# It as a bad idea to use hostname.
- # Hostname -> ipaddress
+ # Hostname -> ip address
try:
self.host = gethostbyname(self.host)
- except Exception as e:
- self.error(str(e))
+ except gaierror as error:
+ self.error(str(error))
return False
- # HTTP Auth? NOT TESTED
- self.auth = self.user and self.password
-
+ scheme = 'http' if self.scheme else 'https'
+ # Add handlers (auth, self signed cert accept)
+ self.url = '%s://%s:%s' % (scheme, self.host, self.port)
+ self._UrlService__add_openers()
# Create URL for every Elasticsearch API
- url_node_stats = 'http://%s:%s/_nodes/_local/stats' % (self.host, self.port)
- url_cluster_health = 'http://%s:%s/_cluster/health' % (self.host, self.port)
- url_cluster_stats = 'http://%s:%s/_cluster/stats' % (self.host, self.port)
+ url_node_stats = '%s://%s:%s/_nodes/_local/stats' % (scheme, self.host, self.port)
+ url_cluster_health = '%s://%s:%s/_cluster/health' % (scheme, self.host, self.port)
+ url_cluster_stats = '%s://%s:%s/_cluster/stats' % (scheme, self.host, self.port)
# Create list of enabled API calls
user_choice = [bool(self.configuration.get('node_stats', True)),
bool(self.configuration.get('cluster_health', True)),
bool(self.configuration.get('cluster_stats', True))]
-
- avail_methods = [(self._get_node_stats, url_node_stats),
- (self._get_cluster_health, url_cluster_health),
- (self._get_cluster_stats, url_cluster_stats)]
+
+ avail_methods = [METHODS(get_data_function=self._get_node_stats_, url=url_node_stats),
+ METHODS(get_data_function=self._get_cluster_health_, url=url_cluster_health),
+ METHODS(get_data_function=self._get_cluster_stats_, url=url_cluster_stats)]
# Remove disabled API calls from 'avail methods'
- self.methods = [avail_methods[_] for _ in range(len(avail_methods)) if user_choice[_]]
+ self.methods = [avail_methods[e[0]] for e in enumerate(avail_methods) if user_choice[e[0]]]
# Run _get_data for ALL active API calls.
- api_result = {}
+ api_check_result = dict()
+ data_from_check = dict()
for method in self.methods:
- api_result[method[1]] = (bool(self._get_raw_data(method[1])))
+ try:
+ api_check_result[method.url] = method.get_data_function(None, method.url)
+ data_from_check.update(api_check_result[method.url] or dict())
+ except KeyError as error:
+ self.error('Failed to parse %s. Error: %s' % (method.url, str(error)))
+ return False
# We can start ONLY if all active API calls returned NOT None
- if not all(api_result.values()):
+ if not all(api_check_result.values()):
self.error('Plugin could not get data from all APIs')
- self.error('%s' % api_result)
return False
else:
- self.info('%s' % api_result)
- self.info('Plugin was started successfully')
-
+ self._data_from_check = data_from_check
return True
- def _get_raw_data(self, url):
- try:
- if not self.auth:
- raw_data = get(url)
- else:
- raw_data = get(url, auth=(self.user, self.password))
- except Exception:
- return None
-
- return raw_data
-
def _get_data(self):
threads = list()
queue = Queue()
result = dict()
for method in self.methods:
- th = Thread(target=method[0], args=(queue, method[1]))
+ th = Thread(target=method.get_data_function, args=(queue, method.url))
th.start()
threads.append(th)
@@ -289,102 +369,79 @@ class Service(UrlService):
return result or None
- def _get_cluster_health(self, queue, url):
+ def _get_cluster_health_(self, queue, url):
"""
Format data received from http request
:return: dict
"""
- data = self._get_raw_data(url)
+ raw_data = self._get_raw_data(url)
- if not data:
- queue.put({})
+ if not raw_data:
+ return queue.put(dict()) if queue else None
else:
- data = data.json()
+ data = loads(raw_data)
+
+ to_netdata = fetch_data_(raw_data=data, metrics_list=HEALTH_STATS)
- to_netdata = dict()
- to_netdata.update(update_key('health', data))
to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
- to_netdata[''.join(['status_', to_netdata.get('health_status', '')])] = 1
+ current_status = 'status_' + data['status']
+ to_netdata[current_status] = 1
- queue.put(to_netdata)
+ return queue.put(to_netdata) if queue else to_netdata
- def _get_cluster_stats(self, queue, url):
+ def _get_cluster_stats_(self, queue, url):
"""
Format data received from http request
:return: dict
"""
- data = self._get_raw_data(url)
+ raw_data = self._get_raw_data(url)
- if not data:
- queue.put({})
+ if not raw_data:
+ return queue.put(dict()) if queue else None
else:
- data = data.json()
+ data = loads(raw_data)
- to_netdata = dict()
- to_netdata.update(update_key('count', data['nodes']['count']))
- to_netdata.update(update_key('query_cache', data['indices']['query_cache']))
- to_netdata.update(update_key('docs', data['indices']['docs']))
- to_netdata.update(update_key('store', data['indices']['store']))
- to_netdata['indices_count'] = data['indices']['count']
- to_netdata['shards_total'] = data['indices']['shards']['total']
+ to_netdata = fetch_data_(raw_data=data, metrics_list=CLUSTER_STATS)
- queue.put(to_netdata)
+ return queue.put(to_netdata) if queue else to_netdata
- def _get_node_stats(self, queue, url):
+ def _get_node_stats_(self, queue, url):
"""
Format data received from http request
:return: dict
"""
- data = self._get_raw_data(url)
+ raw_data = self._get_raw_data(url)
- if not data:
- queue.put({})
+ if not raw_data:
+ return queue.put(dict()) if queue else None
else:
- data = data.json()
+ data = loads(raw_data)
+
node = list(data['nodes'].keys())[0]
- to_netdata = dict()
- # Search performance metrics
- to_netdata.update(data['nodes'][node]['indices']['search'])
- to_netdata['query_latency'] = self.find_avg(to_netdata['query_total'],
- to_netdata['query_time_in_millis'], 'query_latency')
- to_netdata['fetch_latency'] = self.find_avg(to_netdata['fetch_total'],
- to_netdata['fetch_time_in_millis'], 'fetch_latency')
-
- # Indexing performance metrics
- for key in ['indexing', 'refresh', 'flush']:
- to_netdata.update(update_key(key, data['nodes'][node]['indices'].get(key, {})))
- to_netdata['indexing_latency'] = self.find_avg(to_netdata['indexing_index_total'],
- to_netdata['indexing_index_time_in_millis'], 'index_latency')
- to_netdata['flushing_latency'] = self.find_avg(to_netdata['flush_total'],
- to_netdata['flush_total_time_in_millis'], 'flush_latency')
- # Memory usage and garbage collection
- to_netdata.update(update_key('young', data['nodes'][node]['jvm']['gc']['collectors']['young']))
- to_netdata.update(update_key('old', data['nodes'][node]['jvm']['gc']['collectors']['old']))
- to_netdata['jvm_heap_percent'] = data['nodes'][node]['jvm']['mem']['heap_used_percent']
- to_netdata['jvm_heap_commit'] = data['nodes'][node]['jvm']['mem']['heap_committed_in_bytes']
-
- # Thread pool queues and rejections
- for key in ['bulk', 'index', 'search', 'merge']:
- to_netdata.update(update_key(key, data['nodes'][node]['thread_pool'].get(key, {})))
-
- # Fielddata cache
- to_netdata['index_fdata_mem'] = data['nodes'][node]['indices']['fielddata']['memory_size_in_bytes']
- to_netdata['index_fdata_evic'] = data['nodes'][node]['indices']['fielddata']['evictions']
- to_netdata['breakers_fdata_trip'] = data['nodes'][node]['breakers']['fielddata']['tripped']
-
- # Host metrics
- to_netdata.update(update_key('http', data['nodes'][node]['http']))
- to_netdata.update(update_key('transport', data['nodes'][node]['transport']))
- to_netdata['file_descriptors_used'] = round(float(data['nodes'][node]['process']['open_file_descriptors'])
- / data['nodes'][node]['process']['max_file_descriptors'] * 1000)
-
- queue.put(to_netdata)
-
- def find_avg(self, value1, value2, key):
+ to_netdata = fetch_data_(raw_data=data['nodes'][node], metrics_list=NODE_STATS)
+
+ # Search performance latency
+ to_netdata['query_latency'] = self.find_avg_(to_netdata['query_total'],
+ to_netdata['query_time_in_millis'], 'query_latency')
+ to_netdata['fetch_latency'] = self.find_avg_(to_netdata['fetch_total'],
+ to_netdata['fetch_time_in_millis'], 'fetch_latency')
+
+ # Indexing performance latency
+ to_netdata['indexing_latency'] = self.find_avg_(to_netdata['indexing_index_total'],
+ to_netdata['indexing_index_time_in_millis'], 'index_latency')
+ to_netdata['flushing_latency'] = self.find_avg_(to_netdata['flush_total'],
+ to_netdata['flush_total_time_in_millis'], 'flush_latency')
+
+ to_netdata['file_descriptors_used'] = round(float(to_netdata['open_file_descriptors'])
+ / to_netdata['max_file_descriptors'] * 1000)
+
+ return queue.put(to_netdata) if queue else to_netdata
+
+ def find_avg_(self, value1, value2, key):
if key not in self.latency:
self.latency.update({key: [value1, value2]})
return 0
@@ -398,5 +455,17 @@ class Service(UrlService):
return 0
-def update_key(string, dictionary):
- return {'_'.join([string, k]): v for k, v in dictionary.items()}
+def fetch_data_(raw_data, metrics_list):
+ to_netdata = dict()
+ for metric, new_name, function in metrics_list:
+ value = raw_data
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except KeyError:
+ break
+ if not isinstance(value, dict) and key:
+ to_netdata[new_name or key] = value if not function else function(value)
+
+ return to_netdata
+
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
index 2d80282c6..c7d24e8c1 100644
--- a/python.d/fail2ban.chart.py
+++ b/python.d/fail2ban.chart.py
@@ -4,16 +4,18 @@
from base import LogService
from re import compile
+
try:
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
from os import access as is_accessible, R_OK
+from os.path import isdir
+from glob import glob
priority = 60000
retries = 60
-regex = compile(r'([A-Za-z-]+\]) enabled = ([a-z]+)')
-
+REGEX = compile(r'\[([A-Za-z-_]+)][^\[\]]*?(?<!# )enabled = true')
ORDER = ['jails_group']
@@ -23,22 +25,17 @@ class Service(LogService):
self.order = ORDER
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
- self.default_jails = ['ssh']
+ self.conf_dir = self.configuration.get('conf_dir', '')
try:
self.exclude = self.configuration['exclude'].split()
except (KeyError, AttributeError):
self.exclude = []
-
def _get_data(self):
"""
Parse new log lines
:return: dict
"""
-
- # If _get_raw_data returns empty list (no new lines in log file) we will send to Netdata this
- self.data = {jail: 0 for jail in self.jails_list}
-
try:
raw = self._get_raw_data()
if raw is None:
@@ -50,42 +47,86 @@ class Service(LogService):
# Fail2ban logs looks like
# 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
- self.data = dict(
+ data = dict(
zip(
self.jails_list,
[len(list(filterfalse(lambda line: (jail + '] Ban') not in line, raw))) for jail in self.jails_list]
))
+ for jail in data:
+ self.data[jail] += data[jail]
+
return self.data
def check(self):
-
+
# Check "log_path" is accessible.
# If NOT STOP plugin
if not is_accessible(self.log_path, R_OK):
- self.error('Cannot access file %s' % (self.log_path))
+ self.error('Cannot access file %s' % self.log_path)
return False
+ jails_list = list()
+
+ if self.conf_dir:
+ dir_jails, error = parse_conf_dir(self.conf_dir)
+ jails_list.extend(dir_jails)
+ if not dir_jails:
+ self.error(error)
+
+ if self.conf_path:
+ path_jails, error = parse_conf_path(self.conf_path)
+ jails_list.extend(path_jails)
+ if not path_jails:
+ self.error(error)
- # Check "conf_path" is accessible.
- # If "conf_path" is accesible try to parse it to find enabled jails
- if is_accessible(self.conf_path, R_OK):
- with open(self.conf_path, 'rt') as jails_conf:
- jails_list = regex.findall(' '.join(jails_conf.read().split()))
- self.jails_list = [jail[:-1] for jail, status in jails_list if status == 'true']
- else:
- self.jails_list = []
- self.error('Cannot access jail.local file %s.' % (self.conf_path))
-
# If for some reason parse failed we still can START with default jails_list.
- self.jails_list = [jail for jail in self.jails_list if jail not in self.exclude]\
- if self.jails_list else self.default_jails
+ self.jails_list = list(set(jails_list) - set(self.exclude)) or ['ssh']
+ self.data = dict([(jail, 0) for jail in self.jails_list])
self.create_dimensions()
- self.info('Plugin succefully started. Jails: %s' % (self.jails_list))
+ self.info('Plugin successfully started. Jails: %s' % self.jails_list)
return True
def create_dimensions(self):
- self.definitions = {'jails_group':
- {'options':
- [None, "Jails ban statistics", "bans/s", 'Jails', 'jail.ban', 'line'], 'lines': []}}
+ self.definitions = {
+ 'jails_group': {'options': [None, "Jails ban statistics", "bans/s", 'jails', 'jail.ban', 'line'],
+ 'lines': []}}
for jail in self.jails_list:
- self.definitions['jails_group']['lines'].append([jail, jail, 'absolute'])
+ self.definitions['jails_group']['lines'].append([jail, jail, 'incremental'])
+
+
+def parse_conf_dir(conf_dir):
+ if not isdir(conf_dir):
+ return list(), '%s is not a directory' % conf_dir
+
+ jail_local = list(filter(lambda local: is_accessible(local, R_OK), glob(conf_dir + '/*.local')))
+ jail_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(conf_dir + '/*.conf')))
+
+ if not (jail_local or jail_conf):
+ return list(), '%s is empty or not readable' % conf_dir
+
+ # According "man jail.conf" files could be *.local AND *.conf
+ # *.conf files parsed first. Changes in *.local overrides configuration in *.conf
+ if jail_conf:
+ jail_local.extend([conf for conf in jail_conf if conf[:-5] not in [local[:-6] for local in jail_local]])
+ jails_list = list()
+ for conf in jail_local:
+ with open(conf, 'rt') as f:
+ raw_data = f.read()
+
+ data = ' '.join(raw_data.split())
+ jails_list.extend(REGEX.findall(data))
+ jails_list = list(set(jails_list))
+
+ return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_dir
+
+
+def parse_conf_path(conf_path):
+ if not is_accessible(conf_path, R_OK):
+ return list(), '%s is not readable' % conf_path
+
+ with open(conf_path, 'rt') as jails_conf:
+ raw_data = jails_conf.read()
+
+ data = raw_data.split()
+ jails_list = REGEX.findall(' '.join(data))
+ return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_path
diff --git a/python.d/freeradius.chart.py b/python.d/freeradius.chart.py
index 2ac280f0e..f3de15735 100644
--- a/python.d/freeradius.chart.py
+++ b/python.d/freeradius.chart.py
@@ -3,7 +3,6 @@
# Author: l2isbad
from base import SimpleService
-from os.path import isfile
from re import findall
from subprocess import Popen, PIPE
@@ -11,7 +10,6 @@ from subprocess import Popen, PIPE
priority = 60000
retries = 60
update_every = 15
-directories = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
@@ -62,27 +60,23 @@ class Service(SimpleService):
self.acct = self.configuration.get('acct', False)
self.proxy_auth = self.configuration.get('proxy_auth', False)
self.proxy_acct = self.configuration.get('proxy_acct', False)
- try:
- self.echo = [''.join([directory, 'echo']) for directory in directories if isfile(''.join([directory, 'echo']))][0]
- self.radclient = [''.join([directory, 'radclient']) for directory in directories if isfile(''.join([directory, 'radclient']))][0]
- except IndexError:
- self.echo = []
- self.radclient = []
+ self.echo = self.find_binary('echo')
+ self.radclient = self.find_binary('radclient')
self.sub_echo = [self.echo, 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept']
self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', ':'.join([self.host, self.port]), 'status', self.secret]
def check(self):
if not all([self.echo, self.radclient]):
- self.error('Command radclient not found')
+ self.error('Can\'t locate \'radclient\' binary or binary is not executable by netdata')
return False
if self._get_raw_data():
chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
- self.definitions = {k:v for k, v in CHARTS.items() if k in self.order}
+ self.definitions = dict([chart for chart in CHARTS.items() if chart[0] in self.order])
self.info('Plugin was started succesfully')
return True
else:
- self.error('Request returned no data. Is server alive? Used options: host {}, port {}, secret {}'.format(self.host, self.port, self.secret))
+ self.error('Request returned no data. Is server alive? Used options: host {0}, port {1}, secret {2}'.format(self.host, self.port, self.secret))
return False
@@ -92,7 +86,7 @@ class Service(SimpleService):
:return: dict
"""
result = self._get_raw_data()
- return {k.lower():int(v) for k, v in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)}
+ return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
def _get_raw_data(self):
"""
diff --git a/python.d/gunicorn_log.chart.py b/python.d/gunicorn_log.chart.py
deleted file mode 100644
index 945963635..000000000
--- a/python.d/gunicorn_log.chart.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nginx log netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Modified for Gunicorn by: Jeff Willette (deltaskelta)
-
-from base import LogService
-import re
-
-priority = 60000
-retries = 60
-# update_every = 3
-
-ORDER = ['codes']
-CHARTS = {
- 'codes': {
- 'options': [None, 'gunicorn status codes', 'requests/s', 'requests', 'gunicorn_log.codes', 'stacked'],
- 'lines': [
- ["2xx", None, "incremental"],
- ["3xx", None, "incremental"],
- ["4xx", None, "incremental"],
- ["5xx", None, "incremental"]
- ]}
-}
-
-
-class Service(LogService):
- def __init__(self, configuration=None, name=None):
- LogService.__init__(self, configuration=configuration, name=name)
- if len(self.log_path) == 0:
- self.log_path = "/var/log/gunicorn/access.log"
- self.order = ORDER
- self.definitions = CHARTS
- pattern = r'" ([0-9]{3}) '
- #pattern = r'(?:" )([0-9][0-9][0-9]) ?'
- self.regex = re.compile(pattern)
-
- def _get_data(self):
- """
- Parse new log lines
- :return: dict
- """
- data = {'2xx': 0,
- '3xx': 0,
- '4xx': 0,
- '5xx': 0}
- try:
- raw = self._get_raw_data()
- if raw is None:
- return None
- elif not raw:
- return data
- except (ValueError, AttributeError):
- return None
-
- regex = self.regex
- for line in raw:
- code = regex.search(line)
- try:
- beginning = code.group(1)[0]
- except AttributeError:
- continue
-
- if beginning == '2':
- data["2xx"] += 1
- elif beginning == '3':
- data["3xx"] += 1
- elif beginning == '4':
- data["4xx"] += 1
- elif beginning == '5':
- data["5xx"] += 1
-
- return data
diff --git a/python.d/isc_dhcpd.chart.py b/python.d/isc_dhcpd.chart.py
index 54fbfb1f5..bb9ba5cbc 100644
--- a/python.d/isc_dhcpd.chart.py
+++ b/python.d/isc_dhcpd.chart.py
@@ -48,31 +48,31 @@ class Service(SimpleService):
except (ValueError, IndexError, AttributeError, SyntaxError) as e:
self.error('Pools configurations is incorrect', str(e))
return False
-
+
# Creating static charts
self.order = ['parse_time', 'leases_size', 'utilization', 'total']
self.definitions = {'utilization':
{'options':
- [None, 'Pools utilization', 'used %', 'Utilization', 'isc_dhcpd.util', 'line'],
+ [None, 'Pools utilization', 'used %', 'utilization', 'isc_dhcpd.util', 'line'],
'lines': []},
'total':
{'options':
- [None, 'Total all pools', 'leases', 'Utilization', 'isc_dhcpd.total', 'line'],
+ [None, 'Total all pools', 'leases', 'utilization', 'isc_dhcpd.total', 'line'],
'lines': [['total', 'leases', 'absolute']]},
'parse_time':
{'options':
- [None, 'Parse time', 'ms', 'Parse stats', 'isc_dhcpd.parse', 'line'],
+ [None, 'Parse time', 'ms', 'parse stats', 'isc_dhcpd.parse', 'line'],
'lines': [['ptime', 'time', 'absolute']]},
'leases_size':
{'options':
- [None, 'dhcpd.leases file size', 'kilobytes', 'Parse stats', 'isc_dhcpd.lsize', 'line'],
+ [None, 'dhcpd.leases file size', 'kilobytes', 'parse stats', 'isc_dhcpd.lsize', 'line'],
'lines': [['lsize', 'size', 'absolute']]}}
# Creating dynamic charts
for pool in self.pools:
self.definitions['utilization']['lines'].append([''.join(['ut_', pool]), pool, 'absolute'])
self.order.append(''.join(['leases_', pool]))
self.definitions[''.join(['leases_', pool])] = \
- {'options': [None, 'Active leases', 'leases', 'Pools', 'isc_dhcpd.lease', 'area'],
+ {'options': [None, 'Active leases', 'leases', 'pools', 'isc_dhcpd.lease', 'area'],
'lines': [[''.join(['le_', pool]), pool, 'absolute']]}
self.info('Plugin was started succesfully')
@@ -93,13 +93,10 @@ class Service(SimpleService):
part2 = filterfalse(find_ends, dhcp_leases)
raw_result = dict(zip(part1, part2))
time_end = time()
-
file_parse_time = round((time_end - time_start) * 1000)
-
except Exception as e:
self.error("Failed to parse leases file:", str(e))
return None
-
else:
result = (raw_result, file_parse_time)
return result
@@ -109,32 +106,31 @@ class Service(SimpleService):
:return: dict
"""
raw_leases = self._get_raw_data()
-
if not raw_leases:
return None
# Result: {ipaddress: end lease time, ...}
- all_leases = {k[6:len(k)-3]:v[7:len(v)-2] for k, v in raw_leases[0].items()}
+ all_leases = dict([(k[6:len(k)-3], v[7:len(v)-2]) for k, v in raw_leases[0].items()])
# Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
active_leases = [k for k, v in all_leases.items() if is_binding_active(all_leases[k])]
# Result: {pool: number of active bindings in pool, ...}
- pools_count = {pool: len([lease for lease in active_leases if is_address_in(lease, pool)])
- for pool in self.pools}
+ pools_count = dict([(pool, len([lease for lease in active_leases if is_address_in(lease, pool)]))
+ for pool in self.pools])
# Result: {pool: number of host ip addresses in pool, ...}
- pools_max = {pool: (2 ** (32 - int(pool.split('/')[1])) - 2)
- for pool in self.pools}
+ pools_max = dict([(pool, (2 ** (32 - int(pool.split('/')[1])) - 2))
+ for pool in self.pools])
# Result: {pool: % utilization, ....} (percent)
- pools_util = {pool:int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0))
- for pool in self.pools}
+ pools_util = dict([(pool, int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0)))
+ for pool in self.pools])
# Bulding dicts to send to netdata
- final_count = {''.join(['le_', k]): v for k, v in pools_count.items()}
- final_util = {''.join(['ut_', k]): v for k, v in pools_util.items()}
-
+ final_count = dict([(''.join(['le_', k]), v) for k, v in pools_count.items()])
+ final_util = dict([(''.join(['ut_', k]), v) for k, v in pools_util.items()])
+
to_netdata = {'total': len(active_leases)}
to_netdata.update({'lsize': int(stat(self.leases_path)[6] / 1024)})
to_netdata.update({'ptime': int(raw_leases[1])})
@@ -159,6 +155,7 @@ def find_lease(value):
def find_ends(value):
return value[2:6] != 'ends'
+
def return_utf(s):
# python2 returns "<type 'str'>" for simple strings
# python3 returns "<class 'str'>" for unicode strings
diff --git a/python.d/mongodb.chart.py b/python.d/mongodb.chart.py
new file mode 100644
index 000000000..c01bd293c
--- /dev/null
+++ b/python.d/mongodb.chart.py
@@ -0,0 +1,672 @@
+# -*- coding: utf-8 -*-
+# Description: mongodb netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from copy import deepcopy
+from datetime import datetime
+from sys import exc_info
+
+try:
+ from pymongo import MongoClient, ASCENDING, DESCENDING
+ from pymongo.errors import PyMongoError
+ PYMONGO = True
+except ImportError:
+ PYMONGO = False
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+REPLSET_STATES = [
+ ('1', 'primary'),
+ ('8', 'down'),
+ ('2', 'secondary'),
+ ('3', 'recovering'),
+ ('5', 'startup2'),
+ ('4', 'fatal'),
+ ('7', 'arbiter'),
+ ('6', 'unknown'),
+ ('9', 'rollback'),
+ ('10', 'removed'),
+ ('0', 'startup')]
+
+
+def multiply_by_100(value):
+ return value * 100
+
+DEFAULT_METRICS = [
+ ('opcounters.delete', None, None),
+ ('opcounters.update', None, None),
+ ('opcounters.insert', None, None),
+ ('opcounters.query', None, None),
+ ('opcounters.getmore', None, None),
+ ('globalLock.activeClients.readers', 'activeClients_readers', None),
+ ('globalLock.activeClients.writers', 'activeClients_writers', None),
+ ('connections.available', 'connections_available', None),
+ ('connections.current', 'connections_current', None),
+ ('mem.mapped', None, None),
+ ('mem.resident', None, None),
+ ('mem.virtual', None, None),
+ ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
+ ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
+ ('asserts.msg', None, None),
+ ('asserts.regular', None, None),
+ ('asserts.user', None, None),
+ ('asserts.warning', None, None),
+ ('extra_info.page_faults', None, None),
+ ('metrics.record.moves', None, None),
+ ('backgroundFlushing.average_ms', None, multiply_by_100),
+ ('backgroundFlushing.last_ms', None, multiply_by_100),
+ ('backgroundFlushing.flushes', None, multiply_by_100),
+ ('metrics.cursor.timedOut', None, None),
+ ('metrics.cursor.open.total', 'cursor_total', None),
+ ('metrics.cursor.open.noTimeout', None, None),
+ ('cursors.timedOut', None, None),
+ ('cursors.totalOpen', 'cursor_total', None)
+]
+
+DUR = [
+ ('dur.commits', None, None),
+ ('dur.journaledMB', None, multiply_by_100)
+]
+
+WIREDTIGER = [
+ ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
+ ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
+ ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
+ ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
+ ('wiredTiger.cache.bytes currently in the cache', None, None),
+ ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
+ ('wiredTiger.cache.maximum bytes configured', None, None),
+ ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
+ ('wiredTiger.cache.modified pages evicted', 'modified', None)
+]
+
+TCMALLOC = [
+ ('tcmalloc.generic.current_allocated_bytes', None, None),
+ ('tcmalloc.generic.heap_size', None, None),
+ ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
+ ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
+]
+
+COMMANDS = [
+ ('metrics.commands.count.total', 'count_total', None),
+ ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.eval.total', 'eval_total', None),
+ ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
+ ('metrics.commands.insert.total', 'insert_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.count.failed', 'count_failed', None),
+ ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None),
+ ('metrics.commands.eval.failed', 'eval_failed', None),
+ ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
+ ('metrics.commands.insert.failed', 'insert_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None)
+]
+
+LOCKS = [
+ ('locks.Collection.acquireCount.R', 'Collection_R', None),
+ ('locks.Collection.acquireCount.r', 'Collection_r', None),
+ ('locks.Collection.acquireCount.W', 'Collection_W', None),
+ ('locks.Collection.acquireCount.w', 'Collection_w', None),
+ ('locks.Database.acquireCount.R', 'Database_R', None),
+ ('locks.Database.acquireCount.r', 'Database_r', None),
+ ('locks.Database.acquireCount.W', 'Database_W', None),
+ ('locks.Database.acquireCount.w', 'Database_w', None),
+ ('locks.Global.acquireCount.R', 'Global_R', None),
+ ('locks.Global.acquireCount.r', 'Global_r', None),
+ ('locks.Global.acquireCount.W', 'Global_W', None),
+ ('locks.Global.acquireCount.w', 'Global_w', None),
+ ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
+ ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
+ ('locks.oplog.acquireCount.r', 'oplog_r', None),
+ ('locks.oplog.acquireCount.w', 'oplog_w', None)
+]
+
+DBSTATS = [
+ 'dataSize',
+ 'indexSize',
+ 'storageSize',
+ 'objects'
+]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions',
+ 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate',
+ 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults',
+ 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts',
+ 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog',
+ 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate']
+
+CHARTS = {
+ 'read_operations': {
+ 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
+ 'mongodb.read_operations', 'line'],
+ 'lines': [
+ ['query', None, 'incremental'],
+ ['getmore', None, 'incremental']
+ ]},
+ 'write_operations': {
+ 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
+ 'mongodb.write_operations', 'line'],
+ 'lines': [
+ ['insert', None, 'incremental'],
+ ['update', None, 'incremental'],
+ ['delete', None, 'incremental']
+ ]},
+ 'active_clients': {
+ 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
+ 'throughput metrics', 'mongodb.active_clients', 'line'],
+ 'lines': [
+ ['activeClients_readers', 'readers', 'absolute'],
+ ['activeClients_writers', 'writers', 'absolute']
+ ]},
+ 'journaling_transactions': {
+ 'options': [None, 'Transactions that have been written to the journal', 'commits',
+ 'database performance', 'mongodb.journaling_transactions', 'line'],
+ 'lines': [
+ ['commits', None, 'absolute']
+ ]},
+ 'journaling_volume': {
+ 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
+ 'mongodb.journaling_volume', 'line'],
+ 'lines': [
+ ['journaledMB', 'volume', 'absolute', 1, 100]
+ ]},
+ 'background_flush_average': {
+ 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
+ 'mongodb.background_flush_average', 'line'],
+ 'lines': [
+ ['average_ms', 'time', 'absolute', 1, 100]
+ ]},
+ 'background_flush_last': {
+ 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
+ 'mongodb.background_flush_last', 'line'],
+ 'lines': [
+ ['last_ms', 'time', 'absolute', 1, 100]
+ ]},
+ 'background_flush_rate': {
+ 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
+ 'lines': [
+ ['flushes', 'flushes', 'incremental', 1, 1]
+ ]},
+ 'wiredtiger_read': {
+ 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_read', 'stacked'],
+ 'lines': [
+ ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
+ ]},
+ 'wiredtiger_write': {
+ 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_write', 'stacked'],
+ 'lines': [
+ ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
+ ]},
+ 'cursors': {
+ 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
+ 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
+ 'lines': [
+ ['cursor_total', 'openned', 'absolute', 1, 1],
+ ['noTimeout', None, 'absolute', 1, 1],
+ ['timedOut', None, 'incremental', 1, 1]
+ ]},
+ 'connections': {
+ 'options': [None, 'Currently connected clients and unused connections', 'connections',
+ 'resource utilization', 'mongodb.connections', 'stacked'],
+ 'lines': [
+ ['connections_available', 'unused', 'absolute', 1, 1],
+ ['connections_current', 'connected', 'absolute', 1, 1]
+ ]},
+ 'memory': {
+ 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
+ 'lines': [
+ ['virtual', None, 'absolute', 1, 1],
+ ['resident', None, 'absolute', 1, 1],
+ ['nonmapped', None, 'absolute', 1, 1],
+ ['mapped', None, 'absolute', 1, 1]
+ ]},
+ 'page_faults': {
+ 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
+ 'resource utilization', 'mongodb.page_faults', 'line'],
+ 'lines': [
+ ['page_faults', None, 'incremental', 1, 1]
+ ]},
+ 'queued_requests': {
+ 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation',
+ 'mongodb.queued_requests', 'line'],
+ 'lines': [
+ ['currentQueue_readers', 'readers', 'absolute', 1, 1],
+ ['currentQueue_writers', 'writers', 'absolute', 1, 1]
+ ]},
+ 'record_moves': {
+ 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
+ 'resource saturation', 'mongodb.record_moves', 'line'],
+ 'lines': [
+ ['moves', None, 'incremental', 1, 1]
+ ]},
+ 'asserts': {
+ 'options': [None, 'Number of message, warning, regular, corresponding to errors generated'
+ ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
+ 'lines': [
+ ['msg', None, 'incremental', 1, 1],
+ ['warning', None, 'incremental', 1, 1],
+ ['regular', None, 'incremental', 1, 1],
+ ['user', None, 'incremental', 1, 1]
+ ]},
+ 'wiredtiger_cache': {
+ 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
+ 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
+ 'lines': [
+ ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
+ ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
+ ]},
+ 'wiredtiger_pages_evicted': {
+ 'options': [None, 'Pages evicted from the cache',
+ 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
+ 'lines': [
+ ['unmodified', None, 'absolute', 1, 1],
+ ['modified', None, 'absolute', 1, 1]
+ ]},
+ 'dbstats_objects': {
+ 'options': [None, 'Number of documents in the database among all the collections', 'documents',
+ 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
+ 'lines': [
+ ]},
+ 'tcmalloc_generic': {
+ 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
+ 'lines': [
+ ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
+ ['heap_size', 'heap_size', 'absolute', 1, 1048576]
+ ]},
+ 'tcmalloc_metrics': {
+ 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
+ 'lines': [
+ ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
+ ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
+ ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
+ ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
+ ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
+ ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
+ ]},
+ 'command_total_rate': {
+ 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
+ 'lines': [
+ ['count_total', 'count', 'incremental', 1, 1],
+ ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
+ ['delete_total', 'delete', 'incremental', 1, 1],
+ ['eval_total', 'eval', 'incremental', 1, 1],
+ ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
+ ['insert_total', 'insert', 'incremental', 1, 1],
+ ['update_total', 'update', 'incremental', 1, 1]
+ ]},
+ 'command_failed_rate': {
+ 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
+ 'lines': [
+ ['count_failed', 'count', 'incremental', 1, 1],
+ ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
+ ['delete_failed', 'delete', 'incremental', 1, 1],
+ ['eval_failed', 'eval', 'incremental', 1, 1],
+ ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
+ ['insert_failed', 'insert', 'incremental', 1, 1],
+ ['update_failed', 'update', 'incremental', 1, 1]
+ ]},
+ 'locks_collection': {
+ 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
+ 'lines': [
+ ['Collection_R', 'shared', 'incremental'],
+ ['Collection_W', 'exclusive', 'incremental'],
+ ['Collection_r', 'intent_shared', 'incremental'],
+ ['Collection_w', 'intent_exclusive', 'incremental']
+ ]},
+ 'locks_database': {
+ 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
+ 'lines': [
+ ['Database_R', 'shared', 'incremental'],
+ ['Database_W', 'exclusive', 'incremental'],
+ ['Database_r', 'intent_shared', 'incremental'],
+ ['Database_w', 'intent_exclusive', 'incremental']
+ ]},
+ 'locks_global': {
+ 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
+ 'lines': [
+ ['Global_R', 'shared', 'incremental'],
+ ['Global_W', 'exclusive', 'incremental'],
+ ['Global_r', 'intent_shared', 'incremental'],
+ ['Global_w', 'intent_exclusive', 'incremental']
+ ]},
+ 'locks_metadata': {
+ 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
+ 'lines': [
+ ['Metadata_R', 'shared', 'incremental'],
+ ['Metadata_w', 'intent_exclusive', 'incremental']
+ ]},
+ 'locks_oplog': {
+ 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
+ 'lines': [
+ ['Metadata_r', 'intent_shared', 'incremental'],
+ ['Metadata_w', 'intent_exclusive', 'incremental']
+ ]}
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 27017)
+ self.timeout = self.configuration.get('timeout', 100)
+ self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
+ self.connection = None
+ self.do_replica = None
+ self.databases = list()
+
+ def check(self):
+ if not PYMONGO:
+ self.error('Pymongo module is needed to use mongodb.chart.py')
+ return False
+ self.connection, server_status, error = self._create_connection()
+ if error:
+ self.error(error)
+ return False
+
+ self.build_metrics_to_collect_(server_status)
+
+ try:
+ self._get_data()
+ except (LookupError, SyntaxError, AttributeError):
+ self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
+ return False
+ else:
+ self.create_charts_(server_status)
+ return True
+
+ def build_metrics_to_collect_(self, server_status):
+
+ self.do_replica = 'repl' in server_status
+ if 'dur' in server_status:
+ self.metrics_to_collect.extend(DUR)
+ if 'tcmalloc' in server_status:
+ self.metrics_to_collect.extend(TCMALLOC)
+ if 'commands' in server_status['metrics']:
+ self.metrics_to_collect.extend(COMMANDS)
+ if 'wiredTiger' in server_status:
+ self.metrics_to_collect.extend(WIREDTIGER)
+ if 'Collection' in server_status['locks']:
+ self.metrics_to_collect.extend(LOCKS)
+
+ def create_charts_(self, server_status):
+
+ if 'dur' not in server_status:
+ self.order.remove('journaling_transactions')
+ self.order.remove('journaling_volume')
+
+ if 'backgroundFlushing' not in server_status:
+ self.order.remove('background_flush_average')
+ self.order.remove('background_flush_last')
+ self.order.remove('background_flush_rate')
+
+ if 'wiredTiger' not in server_status:
+ self.order.remove('wiredtiger_write')
+ self.order.remove('wiredtiger_read')
+ self.order.remove('wiredtiger_cache')
+
+ if 'tcmalloc' not in server_status:
+ self.order.remove('tcmalloc_generic')
+ self.order.remove('tcmalloc_metrics')
+
+ if 'commands' not in server_status['metrics']:
+ self.order.remove('command_total_rate')
+ self.order.remove('command_failed_rate')
+
+ if 'Collection' not in server_status['locks']:
+ self.order.remove('locks_collection')
+ self.order.remove('locks_database')
+ self.order.remove('locks_global')
+ self.order.remove('locks_metadata')
+
+ if 'oplog' not in server_status['locks']:
+ self.order.remove('locks_oplog')
+
+ for dbase in self.databases:
+ self.order.append('_'.join([dbase, 'dbstats']))
+ self.definitions['_'.join([dbase, 'dbstats'])] = {
+ 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
+ 'storage size metrics', 'mongodb.dbstats', 'line'],
+ 'lines': [
+ ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
+ ]}
+ self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
+
+ if self.do_replica:
+ def create_lines(hosts, string):
+ lines = list()
+ for host in hosts:
+ dim_id = '_'.join([host, string])
+ lines.append([dim_id, host, 'absolute', 1, 1000])
+ return lines
+
+ def create_state_lines(states):
+ lines = list()
+ for state, description in states:
+ dim_id = '_'.join([host, 'state', state])
+ lines.append([dim_id, description, 'absolute', 1, 1])
+ return lines
+
+ all_hosts = server_status['repl']['hosts']
+ this_host = server_status['repl']['me']
+ other_hosts = [host for host in all_hosts if host != this_host]
+
+ if 'local' in self.databases:
+ self.order.append('oplog_window')
+ self.definitions['oplog_window'] = {
+ 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
+ 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
+ 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
+ # Create "heartbeat delay" chart
+ self.order.append('heartbeat_delay')
+ self.definitions['heartbeat_delay'] = {
+ 'options': [None, 'Time when last heartbeat was received'
+ ' from the replica set member (lastHeartbeatRecv)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
+ 'lines': create_lines(other_hosts, 'heartbeat_lag')}
+ # Create "optimedate delay" chart
+ self.order.append('optimedate_delay')
+ self.definitions['optimedate_delay'] = {
+ 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
+ 'lines': create_lines(all_hosts, 'optimedate')}
+ # Create "replica set members state" chart
+ for host in all_hosts:
+ chart_name = '_'.join([host, 'state'])
+ self.order.append(chart_name)
+ self.definitions[chart_name] = {
+ 'options': [None, 'Replica set member (%s) current state' % host, 'state',
+ 'replication and oplog', 'mongodb.replication_state', 'line'],
+ 'lines': create_state_lines(REPLSET_STATES)}
+
+ def _get_raw_data(self):
+ raw_data = dict()
+
+ raw_data.update(self.get_serverstatus_() or dict())
+ raw_data.update(self.get_dbstats_() or dict())
+ raw_data.update(self.get_replsetgetstatus_() or dict())
+ raw_data.update(self.get_getreplicationinfo_() or dict())
+
+ return raw_data or None
+
+ def get_serverstatus_(self):
+ raw_data = dict()
+ try:
+ raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def get_dbstats_(self):
+ if not self.databases:
+ return None
+
+ raw_data = dict()
+ raw_data['dbStats'] = dict()
+ try:
+ for dbase in self.databases:
+ raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def get_replsetgetstatus_(self):
+ if not self.do_replica:
+ return None
+
+ raw_data = dict()
+ try:
+ raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def get_getreplicationinfo_(self):
+ if not (self.do_replica and 'local' in self.databases):
+ return None
+
+ raw_data = dict()
+ raw_data['getReplicationInfo'] = dict()
+ try:
+ raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
+ "$natural", ASCENDING).limit(1)[0]
+ raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
+ "$natural", DESCENDING).limit(1)[0]
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def _get_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+ serverStatus = raw_data['serverStatus']
+ dbStats = raw_data.get('dbStats')
+ replSetGetStatus = raw_data.get('replSetGetStatus')
+ getReplicationInfo = raw_data.get('getReplicationInfo')
+ utc_now = datetime.utcnow()
+
+ # serverStatus
+ for metric, new_name, function in self.metrics_to_collect:
+ value = serverStatus
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except KeyError:
+ break
+
+ if not isinstance(value, dict) and key:
+ to_netdata[new_name or key] = value if not function else function(value)
+
+ to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
+ to_netdata['mapped'])
+ if to_netdata.get('maximum bytes configured'):
+ maximum = to_netdata['maximum bytes configured']
+ to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache']
+ * 100 / maximum * 1000)
+ to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache']
+ * 100 / maximum * 1000)
+
+ # dbStats
+ if dbStats:
+ for dbase in dbStats:
+ for metric in DBSTATS:
+ key = '_'.join([dbase, metric])
+ to_netdata[key] = dbStats[dbase][metric]
+
+ # replSetGetStatus
+ if replSetGetStatus:
+ other_hosts = list()
+ members = replSetGetStatus['members']
+ unix_epoch = datetime(1970, 1, 1, 0, 0)
+
+ for member in members:
+ if not member.get('self'):
+ other_hosts.append(member)
+ # Replica set time diff between current time and time when last entry from the oplog was applied
+ if member['optimeDate'] != unix_epoch:
+ member_optimedate = member['name'] + '_optimedate'
+ to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
+ multiplier=1000))})
+ # Replica set members state
+ member_state = member['name'] + '_state'
+ for elem in REPLSET_STATES:
+ state = elem[0]
+ to_netdata.update({'_'.join([member_state, state]): 0})
+ to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
+ # Heartbeat lag calculation
+ for other in other_hosts:
+ if other['lastHeartbeatRecv'] != unix_epoch:
+ node = other['name'] + '_heartbeat_lag'
+ to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'],
+ multiplier=1000))
+
+ if getReplicationInfo:
+ first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
+ last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
+ to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
+
+ return to_netdata
+
+ def _create_connection(self):
+ conn_vars = {'host': self.host, 'port': self.port}
+ if hasattr(MongoClient, 'server_selection_timeout'):
+ conn_vars.update({'serverselectiontimeoutms': self.timeout})
+ try:
+ connection = MongoClient(**conn_vars)
+ if self.user and self.password:
+ connection.admin.authenticate(name=self.user, password=self.password)
+ # elif self.user:
+ # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
+ server_status = connection.admin.command('serverStatus')
+ except PyMongoError as error:
+ return None, None, str(error)
+ else:
+ try:
+ self.databases = connection.database_names()
+ except PyMongoError as error:
+ self.info('Can\'t collect databases: %s' % str(error))
+ return connection, server_status, None
+
+
+def delta_calculation(delta, multiplier=1):
+ if hasattr(delta, 'total_seconds'):
+ return delta.total_seconds() * multiplier
+ else:
+ return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py
index 0e3a03299..abf6bf715 100644
--- a/python.d/mysql.chart.py
+++ b/python.d/mysql.chart.py
@@ -2,45 +2,132 @@
# Description: MySQL netdata python.d module
# Author: Pawel Krupa (paulfantom)
-from base import SimpleService
-import msg
-
-# import 3rd party library to handle MySQL communication
-try:
- import MySQLdb
-
- # https://github.com/PyMySQL/mysqlclient-python
- msg.info("using MySQLdb")
-except ImportError:
- try:
- import pymysql as MySQLdb
-
- # https://github.com/PyMySQL/PyMySQL
- msg.info("using pymysql")
- except ImportError:
- msg.error("MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin")
- raise ImportError
+from base import MySQLService
# default module values (can be overridden per job in `config`)
# update_every = 3
priority = 90000
retries = 60
-# default configuration (overridden by python.d.plugin)
-# config = {
-# 'local': {
-# 'user': 'root',
-# 'pass': '',
-# 'socket': '/var/run/mysqld/mysqld.sock',
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority
-# }
-#}
-
# query executed on MySQL server
-QUERY = "SHOW GLOBAL STATUS;"
-QUERY_SLAVE = "SHOW SLAVE STATUS;"
+QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
+QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+
+GLOBAL_STATS = [
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap']
+
+def slave_seconds(value):
+ return value if value else -1
+
+def slave_running(value):
+ return 1 if value == 'Yes' else -1
+
+
+SLAVE_STATS = [
+ ('Seconds_Behind_Master', slave_seconds),
+ ('Slave_SQL_Running', slave_running),
+ ('Slave_IO_Running', slave_running)
+]
ORDER = ['net',
'queries',
@@ -62,431 +149,290 @@ CHARTS = {
'net': {
'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
'lines': [
- ["Bytes_received", "in", "incremental", 8, 1024],
- ["Bytes_sent", "out", "incremental", -8, 1024]
+ ['Bytes_received', 'in', 'incremental', 8, 1024],
+ ['Bytes_sent', 'out', 'incremental', -8, 1024]
]},
'queries': {
'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
'lines': [
- ["Queries", "queries", "incremental"],
- ["Questions", "questions", "incremental"],
- ["Slow_queries", "slow_queries", "incremental"]
+ ['Queries', 'queries', 'incremental'],
+ ['Questions', 'questions', 'incremental'],
+ ['Slow_queries', 'slow_queries', 'incremental']
]},
'handlers': {
'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
'lines': [
- ["Handler_commit", "commit", "incremental"],
- ["Handler_delete", "delete", "incremental"],
- ["Handler_prepare", "prepare", "incremental"],
- ["Handler_read_first", "read_first", "incremental"],
- ["Handler_read_key", "read_key", "incremental"],
- ["Handler_read_next", "read_next", "incremental"],
- ["Handler_read_prev", "read_prev", "incremental"],
- ["Handler_read_rnd", "read_rnd", "incremental"],
- ["Handler_read_rnd_next", "read_rnd_next", "incremental"],
- ["Handler_rollback", "rollback", "incremental"],
- ["Handler_savepoint", "savepoint", "incremental"],
- ["Handler_savepoint_rollback", "savepoint_rollback", "incremental"],
- ["Handler_update", "update", "incremental"],
- ["Handler_write", "write", "incremental"]
+ ['Handler_commit', 'commit', 'incremental'],
+ ['Handler_delete', 'delete', 'incremental'],
+ ['Handler_prepare', 'prepare', 'incremental'],
+ ['Handler_read_first', 'read_first', 'incremental'],
+ ['Handler_read_key', 'read_key', 'incremental'],
+ ['Handler_read_next', 'read_next', 'incremental'],
+ ['Handler_read_prev', 'read_prev', 'incremental'],
+ ['Handler_read_rnd', 'read_rnd', 'incremental'],
+ ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
+ ['Handler_rollback', 'rollback', 'incremental'],
+ ['Handler_savepoint', 'savepoint', 'incremental'],
+ ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
+ ['Handler_update', 'update', 'incremental'],
+ ['Handler_write', 'write', 'incremental']
]},
'table_locks': {
'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
'lines': [
- ["Table_locks_immediate", "immediate", "incremental"],
- ["Table_locks_waited", "waited", "incremental", -1, 1]
+ ['Table_locks_immediate', 'immediate', 'incremental'],
+ ['Table_locks_waited', 'waited', 'incremental', -1, 1]
]},
'join_issues': {
'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
'lines': [
- ["Select_full_join", "full_join", "incremental"],
- ["Select_full_range_join", "full_range_join", "incremental"],
- ["Select_range", "range", "incremental"],
- ["Select_range_check", "range_check", "incremental"],
- ["Select_scan", "scan", "incremental"]
+ ['Select_full_join', 'full_join', 'incremental'],
+ ['Select_full_range_join', 'full_range_join', 'incremental'],
+ ['Select_range', 'range', 'incremental'],
+ ['Select_range_check', 'range_check', 'incremental'],
+ ['Select_scan', 'scan', 'incremental']
]},
'sort_issues': {
'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
'lines': [
- ["Sort_merge_passes", "merge_passes", "incremental"],
- ["Sort_range", "range", "incremental"],
- ["Sort_scan", "scan", "incremental"]
+ ['Sort_merge_passes', 'merge_passes', 'incremental'],
+ ['Sort_range', 'range', 'incremental'],
+ ['Sort_scan', 'scan', 'incremental']
]},
'tmp': {
'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
'lines': [
- ["Created_tmp_disk_tables", "disk_tables", "incremental"],
- ["Created_tmp_files", "files", "incremental"],
- ["Created_tmp_tables", "tables", "incremental"]
+ ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
+ ['Created_tmp_files', 'files', 'incremental'],
+ ['Created_tmp_tables', 'tables', 'incremental']
]},
'connections': {
'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
'lines': [
- ["Connections", "all", "incremental"],
- ["Aborted_connects", "aborted", "incremental"]
+ ['Connections', 'all', 'incremental'],
+ ['Aborted_connects', 'aborted', 'incremental']
]},
'binlog_cache': {
'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
'lines': [
- ["Binlog_cache_disk_use", "disk", "incremental"],
- ["Binlog_cache_use", "all", "incremental"]
+ ['Binlog_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_cache_use', 'all', 'incremental']
]},
'threads': {
'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
'lines': [
- ["Threads_connected", "connected", "absolute"],
- ["Threads_created", "created", "incremental"],
- ["Threads_cached", "cached", "absolute", -1, 1],
- ["Threads_running", "running", "absolute"],
+ ['Threads_connected', 'connected', 'absolute'],
+ ['Threads_created', 'created', 'incremental'],
+ ['Threads_cached', 'cached', 'absolute', -1, 1],
+ ['Threads_running', 'running', 'absolute'],
]},
'thread_cache_misses': {
'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
'lines': [
- ["Thread_cache_misses", "misses", "absolute", 1, 100]
+ ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
]},
'innodb_io': {
'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [
- ["Innodb_data_read", "read", "incremental", 1, 1024],
- ["Innodb_data_written", "write", "incremental", -1, 1024]
+ ['Innodb_data_read', 'read', 'incremental', 1, 1024],
+ ['Innodb_data_written', 'write', 'incremental', -1, 1024]
]},
'innodb_io_ops': {
'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
'lines': [
- ["Innodb_data_reads", "reads", "incremental"],
- ["Innodb_data_writes", "writes", "incremental", -1, 1],
- ["Innodb_data_fsyncs", "fsyncs", "incremental"]
+ ['Innodb_data_reads', 'reads', 'incremental'],
+ ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
+ ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
]},
'innodb_io_pending_ops': {
'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb', 'mysql.innodb_io_pending_ops', 'line'],
'lines': [
- ["Innodb_data_pending_reads", "reads", "absolute"],
- ["Innodb_data_pending_writes", "writes", "absolute", -1, 1],
- ["Innodb_data_pending_fsyncs", "fsyncs", "absolute"]
+ ['Innodb_data_pending_reads', 'reads', 'absolute'],
+ ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
+ ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
]},
'innodb_log': {
'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
'lines': [
- ["Innodb_log_waits", "waits", "incremental"],
- ["Innodb_log_write_requests", "write_requests", "incremental", -1, 1],
- ["Innodb_log_writes", "writes", "incremental", -1, 1],
+ ['Innodb_log_waits', 'waits', 'incremental'],
+ ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
+ ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
]},
'innodb_os_log': {
'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
'lines': [
- ["Innodb_os_log_fsyncs", "fsyncs", "incremental"],
- ["Innodb_os_log_pending_fsyncs", "pending_fsyncs", "absolute"],
- ["Innodb_os_log_pending_writes", "pending_writes", "absolute", -1, 1],
+ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
+ ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
]},
'innodb_os_log_io': {
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [
- ["Innodb_os_log_written", "write", "incremental", -1, 1024],
+ ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
]},
'innodb_cur_row_lock': {
'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb', 'mysql.innodb_cur_row_lock', 'area'],
'lines': [
- ["Innodb_row_lock_current_waits", "current_waits", "absolute"]
+ ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
]},
'innodb_rows': {
'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
- ["Innodb_rows_inserted", "read", "incremental"],
- ["Innodb_rows_read", "deleted", "incremental", -1, 1],
- ["Innodb_rows_updated", "inserted", "incremental", 1, 1],
- ["Innodb_rows_deleted", "updated", "incremental", -1, 1],
+ ['Innodb_rows_inserted', 'read', 'incremental'],
+ ['Innodb_rows_read', 'deleted', 'incremental', -1, 1],
+ ['Innodb_rows_updated', 'inserted', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'updated', 'incremental', -1, 1],
]},
'innodb_buffer_pool_pages': {
'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', 'mysql.innodb_buffer_pool_pages', 'line'],
'lines': [
- ["Innodb_buffer_pool_pages_data", "data", "absolute"],
- ["Innodb_buffer_pool_pages_dirty", "dirty", "absolute", -1, 1],
- ["Innodb_buffer_pool_pages_free", "free", "absolute"],
- ["Innodb_buffer_pool_pages_flushed", "flushed", "incremental", -1, 1],
- ["Innodb_buffer_pool_pages_misc", "misc", "absolute", -1, 1],
- ["Innodb_buffer_pool_pages_total", "total", "absolute"]
+ ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
+ ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
+ ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
]},
'innodb_buffer_pool_bytes': {
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [
- ["Innodb_buffer_pool_bytes_data", "data", "absolute", 1, 1024 * 1024],
- ["Innodb_buffer_pool_bytes_dirty", "dirty", "absolute", -1, 1024 * 1024]
+ ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
+ ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
]},
'innodb_buffer_pool_read_ahead': {
'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb', 'mysql.innodb_buffer_pool_read_ahead', 'area'],
'lines': [
- ["Innodb_buffer_pool_read_ahead", "all", "incremental"],
- ["Innodb_buffer_pool_read_ahead_evicted", "evicted", "incremental", -1, 1],
- ["Innodb_buffer_pool_read_ahead_rnd", "random", "incremental"]
+ ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
+ ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
]},
'innodb_buffer_pool_reqs': {
'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb', 'mysql.innodb_buffer_pool_reqs', 'area'],
'lines': [
- ["Innodb_buffer_pool_read_requests", "reads", "incremental"],
- ["Innodb_buffer_pool_write_requests", "writes", "incremental", -1, 1]
+ ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
+ ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
]},
'innodb_buffer_pool_ops': {
'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb', 'mysql.innodb_buffer_pool_ops', 'area'],
'lines': [
- ["Innodb_buffer_pool_reads", "disk reads", "incremental"],
- ["Innodb_buffer_pool_wait_free", "wait free", "incremental", -1, 1]
+ ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
+ ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
]},
'qcache_ops': {
'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
'lines': [
- ["Qcache_hits", "hits", "incremental"],
- ["Qcache_lowmem_prunes", "lowmem prunes", "incremental", -1, 1],
- ["Qcache_inserts", "inserts", "incremental"],
- ["Qcache_not_cached", "not cached", "incremental", -1, 1]
+ ['Qcache_hits', 'hits', 'incremental'],
+ ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
+ ['Qcache_inserts', 'inserts', 'incremental'],
+ ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
]},
'qcache': {
'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
'lines': [
- ["Qcache_queries_in_cache", "queries", "absolute"]
+ ['Qcache_queries_in_cache', 'queries', 'absolute']
]},
'qcache_freemem': {
'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [
- ["Qcache_free_memory", "free", "absolute", 1, 1024 * 1024]
+ ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
]},
'qcache_memblocks': {
'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
'lines': [
- ["Qcache_free_blocks", "free", "absolute"],
- ["Qcache_total_blocks", "total", "absolute"]
+ ['Qcache_free_blocks', 'free', 'absolute'],
+ ['Qcache_total_blocks', 'total', 'absolute']
]},
'key_blocks': {
'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
'lines': [
- ["Key_blocks_unused", "unused", "absolute"],
- ["Key_blocks_used", "used", "absolute", -1, 1],
- ["Key_blocks_not_flushed", "not flushed", "absolute"]
+ ['Key_blocks_unused', 'unused', 'absolute'],
+ ['Key_blocks_used', 'used', 'absolute', -1, 1],
+ ['Key_blocks_not_flushed', 'not flushed', 'absolute']
]},
'key_requests': {
'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
'lines': [
- ["Key_read_requests", "reads", "incremental"],
- ["Key_write_requests", "writes", "incremental", -1, 1]
+ ['Key_read_requests', 'reads', 'incremental'],
+ ['Key_write_requests', 'writes', 'incremental', -1, 1]
]},
'key_disk_ops': {
'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s', 'myisam', 'mysql.key_disk_ops', 'area'],
'lines': [
- ["Key_reads", "reads", "incremental"],
- ["Key_writes", "writes", "incremental", -1, 1]
+ ['Key_reads', 'reads', 'incremental'],
+ ['Key_writes', 'writes', 'incremental', -1, 1]
]},
'files': {
'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
'lines': [
- ["Open_files", "files", "absolute"]
+ ['Open_files', 'files', 'absolute']
]},
'files_rate': {
'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
'lines': [
- ["Opened_files", "files", "incremental"]
+ ['Opened_files', 'files', 'incremental']
]},
'binlog_stmt_cache': {
'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog', 'mysql.binlog_stmt_cache', 'line'],
'lines': [
- ["Binlog_stmt_cache_disk_use", "disk", "incremental"],
- ["Binlog_stmt_cache_use", "all", "incremental"]
+ ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_stmt_cache_use', 'all', 'incremental']
]},
'connection_errors': {
'options': [None, 'mysql Connection Errors', 'connections/s', 'connections', 'mysql.connection_errors', 'line'],
'lines': [
- ["Connection_errors_accept", "accept", "incremental"],
- ["Connection_errors_internal", "internal", "incremental"],
- ["Connection_errors_max_connections", "max", "incremental"],
- ["Connection_errors_peer_address", "peer_addr", "incremental"],
- ["Connection_errors_select", "select", "incremental"],
- ["Connection_errors_tcpwrap", "tcpwrap", "incremental"]
+ ['Connection_errors_accept', 'accept', 'incremental'],
+ ['Connection_errors_internal', 'internal', 'incremental'],
+ ['Connection_errors_max_connections', 'max', 'incremental'],
+ ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
+ ['Connection_errors_select', 'select', 'incremental'],
+ ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
]},
'slave_behind': {
'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
'lines': [
- ["slave_behind", "seconds", "absolute"]
+ ['Seconds_Behind_Master', 'seconds', 'absolute']
]},
'slave_status': {
'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
'lines': [
- ["slave_sql", "sql_running", "absolute"],
- ["slave_io", "io_running", "absolute"]
+ ['Slave_SQL_Running', 'sql_running', 'absolute'],
+ ['Slave_IO_Running', 'io_running', 'absolute']
]}
}
-class Service(SimpleService):
+class Service(MySQLService):
def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self._parse_config(configuration)
+ MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.connection = None
- self.do_slave = -1
-
- def _parse_config(self, configuration):
- """
- Parse configuration to collect data from MySQL server
- :param configuration: dict
- :return: dict
- """
- parameters = {}
- if self.name is None:
- self.name = 'local'
- if 'user' in configuration:
- parameters['user'] = self.configuration['user']
- if 'pass' in configuration:
- parameters['passwd'] = self.configuration['pass']
- if 'my.cnf' in configuration:
- parameters['read_default_file'] = self.configuration['my.cnf']
- elif 'socket' in configuration:
- parameters['unix_socket'] = self.configuration['socket']
- elif 'host' in configuration:
- parameters['host'] = self.configuration['host']
- if 'port' in configuration:
- parameters['port'] = int(self.configuration['port'])
- self.connection_parameters = parameters
-
- def _connect(self):
- """
- Try to connect to MySQL server
- """
- try:
- self.connection = MySQLdb.connect(connect_timeout=self.update_every, **self.connection_parameters)
- except MySQLdb.OperationalError as e:
- self.error("Cannot establish connection to MySQL.")
- self.debug(str(e))
- raise RuntimeError
- except Exception as e:
- self.error("problem connecting to server:", e)
- raise RuntimeError
-
- def _get_data_slave(self):
- """
- Get slave raw data from MySQL server
- :return: dict
- """
- if self.connection is None:
- try:
- self._connect()
- except RuntimeError:
- return None
-
- slave_data = None
- slave_raw_data = None
- try:
- cursor = self.connection.cursor()
- if cursor.execute(QUERY_SLAVE):
- slave_raw_data = dict(list(zip([elem[0] for elem in cursor.description], cursor.fetchone())))
-
- except MySQLdb.OperationalError as e:
- self.debug("Reconnecting for query", QUERY_SLAVE, ":", str(e))
- try:
- self._connect()
- cursor = self.connection.cursor()
- if cursor.execute(QUERY_SLAVE):
- slave_raw_data = dict(list(zip([elem[0] for elem in cursor.description], cursor.fetchone())))
- except Exception as e:
- self.error("retried, but cannot execute query", QUERY_SLAVE, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
-
- except Exception as e:
- self.error("cannot execute query", QUERY_SLAVE, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
-
- if slave_raw_data is not None:
- slave_data = {
- 'slave_behind': None,
- 'slave_sql': None,
- 'slave_io': None
- }
-
- try:
- slave_data['slave_behind'] = int(slave_raw_data.setdefault('Seconds_Behind_Master', -1))
- except:
- slave_data['slave_behind'] = None
-
- try:
- slave_data['slave_sql'] = 1 if slave_raw_data.get('Slave_SQL_Running') == 'Yes' else -1
- except:
- slave_data['slave_sql'] = None
-
- try:
- slave_data['slave_io'] = 1 if slave_raw_data.get('Slave_IO_Running') == 'Yes' else -1
- except:
- slave_data['slave_io'] = None
-
- return slave_data
+ self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE)
def _get_data(self):
- """
- Get raw data from MySQL server
- :return: dict
- """
- if self.connection is None:
- try:
- self._connect()
- except RuntimeError:
- return None
- try:
- cursor = self.connection.cursor()
- cursor.execute(QUERY)
- raw_data = cursor.fetchall()
- except MySQLdb.OperationalError as e:
- self.debug("Reconnecting for query", QUERY, ":", str(e))
- try:
- self._connect()
- cursor = self.connection.cursor()
- cursor.execute(QUERY)
- raw_data = cursor.fetchall()
- except Exception as e:
- self.error("retried, but cannot execute query", QUERY, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
+ raw_data = self._get_raw_data(description=True)
- except Exception as e:
- self.error("cannot execute query", QUERY, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
+ if not raw_data:
+ return None
- data = dict(raw_data)
+ to_netdata = dict()
- # check for slave data
- # the first time is -1 (so we do it)
- # then it is set to 1 or 0 and we keep it like that
- if self.do_slave != 0:
- slave_data = self._get_data_slave()
- if slave_data is not None:
- data.update(slave_data)
- if self.do_slave == -1:
- self.do_slave = 1
- else:
- if self.do_slave == -1:
- self.error("replication metrics will be disabled - please allow netdata to collect them.")
- self.do_slave = 0
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in GLOBAL_STATS:
+ if key in global_status:
+ to_netdata[key] = global_status[key]
+ if 'Threads_created' in to_netdata and 'Connections' in to_netdata:
+ to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created']) / float(to_netdata['Connections']) * 10000)
- # do calculations
- try:
- data["Thread_cache_misses"] = round(float(data["Threads_created"]) / float(data["Connections"]) * 10000)
- except:
- data["Thread_cache_misses"] = None
+ if 'slave_status' in raw_data:
+ if raw_data['slave_status'][0]:
+ slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0]))
+ for key, function in SLAVE_STATS:
+ if key in slave_raw_data:
+ to_netdata[key] = function(slave_raw_data[key])
+ else:
+ self.queries.pop('slave_status')
- return data
+ return to_netdata or None
- def check(self):
- """
- Check if service is able to connect to server
- :return: boolean
- """
- try:
- self.connection = self._connect()
- return True
- except RuntimeError:
- self.connection = None
- return False
diff --git a/python.d/nginx_log.chart.py b/python.d/nginx_log.chart.py
deleted file mode 100644
index ef964a565..000000000
--- a/python.d/nginx_log.chart.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nginx log netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from base import LogService
-import re
-
-priority = 60000
-retries = 60
-# update_every = 3
-
-ORDER = ['codes']
-CHARTS = {
- 'codes': {
- 'options': [None, 'nginx status codes', 'requests/s', 'requests', 'nginx_log.codes', 'stacked'],
- 'lines': [
- ["2xx", None, "incremental"],
- ["5xx", None, "incremental"],
- ["3xx", None, "incremental"],
- ["4xx", None, "incremental"],
- ["1xx", None, "incremental"],
- ["other", None, "incremental"]
- ]}
-}
-
-
-class Service(LogService):
- def __init__(self, configuration=None, name=None):
- LogService.__init__(self, configuration=configuration, name=name)
- if len(self.log_path) == 0:
- self.log_path = "/var/log/nginx/access.log"
- self.order = ORDER
- self.definitions = CHARTS
- pattern = r'" ([0-9]{3}) ?'
- #pattern = r'(?:" )([0-9][0-9][0-9]) ?'
- self.regex = re.compile(pattern)
-
- self.data = {
- '1xx': 0,
- '2xx': 0,
- '3xx': 0,
- '4xx': 0,
- '5xx': 0,
- 'other': 0
- }
-
- def _get_data(self):
- """
- Parse new log lines
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- if raw is None:
- return None
- elif not raw:
- return self.data
- except (ValueError, AttributeError):
- return None
-
- regex = self.regex
- for line in raw:
- code = regex.search(line)
- try:
- beginning = code.group(1)[0]
- except AttributeError:
- continue
-
- if beginning == '2':
- self.data["2xx"] += 1
- elif beginning == '3':
- self.data["3xx"] += 1
- elif beginning == '4':
- self.data["4xx"] += 1
- elif beginning == '5':
- self.data["5xx"] += 1
- elif beginning == '1':
- self.data["1xx"] += 1
- else:
- self.data["other"] += 1
-
- return self.data
diff --git a/python.d/nsd.chart.py b/python.d/nsd.chart.py
new file mode 100644
index 000000000..68bb4f237
--- /dev/null
+++ b/python.d/nsd.chart.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Description: NSD `nsd-control stats_noreset` netdata python.d module
+# Author: <383c57 at gmail.com>
+
+
+from base import ExecutableService
+import re
+
+# default module values (can be overridden per job in `config`)
+priority = 60000
+retries = 5
+update_every = 30
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
+
+CHARTS = {
+ 'queries': {
+ 'options': [
+ None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'lines': [
+ ['num_queries', 'queries', 'incremental'],]},
+ 'zones': {
+ 'options': [
+ None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'lines': [
+ ['zone_master', 'master', 'absolute'],
+ ['zone_slave', 'slave', 'absolute'],]},
+ 'protocol': {
+ 'options': [
+ None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'lines': [
+ ['num_udp', 'udp', 'incremental'],
+ ['num_udp6', 'udp6', 'incremental'],
+ ['num_tcp', 'tcp', 'incremental'],
+ ['num_tcp6', 'tcp6', 'incremental'],]},
+ 'type': {
+ 'options': [
+ None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'lines': [
+ ['num_type_A', 'A', 'incremental'],
+ ['num_type_NS', 'NS', 'incremental'],
+ ['num_type_CNAME', 'CNAME', 'incremental'],
+ ['num_type_SOA', 'SOA', 'incremental'],
+ ['num_type_PTR', 'PTR', 'incremental'],
+ ['num_type_HINFO', 'HINFO', 'incremental'],
+ ['num_type_MX', 'MX', 'incremental'],
+ ['num_type_NAPTR', 'NAPTR', 'incremental'],
+ ['num_type_TXT', 'TXT', 'incremental'],
+ ['num_type_AAAA', 'AAAA', 'incremental'],
+ ['num_type_SRV', 'SRV', 'incremental'],
+ ['num_type_TYPE255', 'ANY', 'incremental'],]},
+ 'transfer': {
+ 'options': [
+ None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'lines': [
+ ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
+ ['num_type_TYPE252', 'AXFR', 'incremental'],]},
+ 'rcode': {
+ 'options': [
+ None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'lines': [
+ ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
+ ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
+ ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
+ ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
+ ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
+ ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]}
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(
+ self, configuration=configuration, name=name)
+ self.command = "nsd-control stats_noreset"
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ def _get_data(self):
+ lines = self._get_raw_data()
+ if not lines:
+ return None
+
+ r = self.regex
+ stats = dict((k.replace('.', '_'), int(v))
+ for k, v in r.findall(''.join(lines)))
+ stats.setdefault('num_opcode_NOTIFY', 0)
+ stats.setdefault('num_type_TYPE252', 0)
+ stats.setdefault('num_type_TYPE255', 0)
+ return stats
diff --git a/python.d/phpfpm.chart.py b/python.d/phpfpm.chart.py
index b79a35d75..7a9835210 100755..100644
--- a/python.d/phpfpm.chart.py
+++ b/python.d/phpfpm.chart.py
@@ -4,6 +4,7 @@
from base import UrlService
import json
+import re
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -19,44 +20,75 @@ retries = 60
# }}
# charts order (can be overridden if you want less charts, or different order)
+
+POOL_INFO = [
+ ('active processes', 'active'),
+ ('max active processes', 'maxActive'),
+ ('idle processes', 'idle'),
+ ('accepted conn', 'requests'),
+ ('max children reached', 'reached'),
+ ('slow requests', 'slow')
+]
+
+PER_PROCESS_INFO = [
+ ('request duration', 'ReqDur'),
+ ('last request cpu', 'ReqCpu'),
+ ('last request memory', 'ReqMem')
+]
+
+
+def average(collection):
+ return sum(collection, 0.0) / max(len(collection), 1)
+
+CALC = [
+ ('min', min),
+ ('max', max),
+ ('avg', average)
+]
+
ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
CHARTS = {
'connections': {
- 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections', 'line'],
+ 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections',
+ 'line'],
'lines': [
- ["active"],
- ["maxActive", 'max active'],
- ["idle"]
+ ['active'],
+ ['maxActive', 'max active'],
+ ['idle']
]},
'requests': {
'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
'lines': [
- ["requests", None, "incremental"]
+ ['requests', None, 'incremental']
]},
'performance': {
'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
'lines': [
- ["reached", 'max children reached'],
- ["slow", 'slow requests']
+ ['reached', 'max children reached'],
+ ['slow', 'slow requests']
]},
'request_duration': {
- 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration', 'line'],
+ 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
+ 'line'],
'lines': [
- ["maxReqDur", 'max request duration'],
- ["avgReqDur", 'average request duration']
+ ['minReqDur', 'min', 'absolute', 1, 1000],
+ ['maxReqDur', 'max', 'absolute', 1, 1000],
+ ['avgReqDur', 'avg', 'absolute', 1, 1000]
]},
'request_cpu': {
'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
- ["maxReqCPU", 'max request cpu'],
- ["avgReqCPU", 'average request cpu']
+ ['minReqCpu', 'min'],
+ ['maxReqCpu', 'max'],
+ ['avgReqCpu', 'avg']
]},
'request_mem': {
'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
- ["maxReqMem", 'max request memory'],
- ["avgReqMem", 'average request memory']
+ ['minReqMem', 'min', 'absolute', 1, 1024],
+ ['maxReqMem', 'max', 'absolute', 1, 1024],
+ ['avgReqMem', 'avg', 'absolute', 1, 1024]
]}
}
@@ -64,76 +96,73 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- if len(self.url) == 0:
- self.url = "http://localhost/status?full&json"
+ self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.order = ORDER
self.definitions = CHARTS
- self.assignment = {"active processes": 'active',
- "max active processes": 'maxActive',
- "idle processes": 'idle',
- "accepted conn": 'requests',
- "max children reached": 'reached',
- "slow requests": 'slow'}
- self.proc_assignment = {"request duration": 'ReqDur',
- "last request cpu": 'ReqCPU',
- "last request memory": 'ReqMem'}
+ self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
+ self.json = '&json' in self.url or '?json' in self.url
+ self.json_full = self.url.endswith(('?full&json', '?json&full'))
+ self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC
+ for metric, p_name in PER_PROCESS_INFO])
def _get_data(self):
"""
Format data received from http request
:return: dict
"""
- try:
- raw = self._get_raw_data()
- except AttributeError:
+ raw = self._get_raw_data()
+ if not raw:
return None
- if '?json' in self.url or '&json' in self.url:
- try:
- raw_json = json.loads(raw)
- except ValueError:
- return None
- data = {}
- for k,v in raw_json.items():
- if k in self.assignment:
- data[self.assignment[k]] = v
-
- if '&full' in self.url or '?full' in self.url:
- c = 0
- sum_val = {}
- for proc in raw_json['processes']:
- if proc['state'] != 'Idle':
- continue
- c += 1
- for k, v in self.proc_assignment.items():
- d = proc[k]
- if v == 'ReqDur':
- d = d/1000
- if v == 'ReqMem':
- d = d/1024
- if 'max' + v not in data or data['max' + v] < d:
- data['max' + v] = d
- if 'avg' + v not in sum_val:
- sum_val['avg' + v] = 0
- data['avg' + v] = 0
- sum_val['avg' + v] += d
- if len(sum_val):
- for k, v in sum_val.items():
- data[k] = v/c
-
- if len(data) == 0:
- return None
- return data
-
- raw = raw.split('\n')
- data = {}
- for row in raw:
- tmp = row.split(":")
- if str(tmp[0]) in self.assignment:
- try:
- data[self.assignment[tmp[0]]] = int(tmp[1])
- except (IndexError, ValueError):
- pass
- if len(data) == 0:
- return None
- return data
+ raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw)
+
+ # Per Pool info: active connections, requests and performance charts
+ to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
+
+ # Per Process Info: duration, cpu and memory charts (min, max, avg)
+ if self.json_full:
+ p_info = dict()
+ to_netdata.update(self.if_all_processes_running) # If all processes are in running state
+ # Metrics are always 0 if the process is not in Idle state because calculation is done
+ # when the request processing has terminated
+ for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']:
+ p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid'])))
+
+ if p_info:
+ for new_name in PER_PROCESS_INFO:
+ for name, function in CALC:
+ to_netdata[name + new_name[1]] = function([p_info[k] for k in p_info if new_name[1] in k])
+
+ return to_netdata or None
+
+
+def fetch_data_(raw_data, metrics_list, pid=''):
+ """
+ :param raw_data: dict
+ :param metrics_list: list
+ :param pid: str
+ :return: dict
+ """
+ result = dict()
+ for metric, new_name in metrics_list:
+ if metric in raw_data:
+ result[new_name + pid] = float(raw_data[metric])
+ return result
+
+
+def parse_raw_data_(is_json, regex, raw_data):
+ """
+ :param is_json: bool
+ :param regex: compiled regular expr
+ :param raw_data: dict
+ :return: dict
+ """
+ if is_json:
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ return dict()
+ else:
+ raw_data = ' '.join(raw_data.split())
+ return dict(regex.findall(raw_data))
+
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py
index 919b6f8ee..d359bb4f7 100644
--- a/python.d/postgres.chart.py
+++ b/python.d/postgres.chart.py
@@ -2,12 +2,16 @@
# Description: example netdata python.d module
# Authors: facetoe, dangtranhoang
-import re
from copy import deepcopy
-import psycopg2
-from psycopg2 import extensions
-from psycopg2.extras import DictCursor
+try:
+ import psycopg2
+ from psycopg2 import extensions
+ from psycopg2.extras import DictCursor
+ from psycopg2 import OperationalError
+ PSYCOPG2 = True
+except ImportError:
+ PSYCOPG2 = False
from base import SimpleService
@@ -16,39 +20,70 @@ update_every = 1
priority = 90000
retries = 60
-ARCHIVE = """
+METRICS = dict(
+ DATABASE=['connections',
+ 'xact_commit',
+ 'xact_rollback',
+ 'blks_read',
+ 'blks_hit',
+ 'tup_returned',
+ 'tup_fetched',
+ 'tup_inserted',
+ 'tup_updated',
+ 'tup_deleted',
+ 'conflicts',
+ 'size'],
+ BACKENDS=['backends_active',
+ 'backends_idle'],
+ INDEX_STATS=['index_count',
+ 'index_size'],
+ TABLE_STATS=['table_size',
+ 'table_count'],
+ ARCHIVE=['ready_count',
+ 'done_count',
+ 'file_count'],
+ BGWRITER=['writer_scheduled',
+ 'writer_requested'],
+ LOCKS=['ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock']
+)
+
+QUERIES = dict(
+ ARCHIVE="""
SELECT
CAST(COUNT(*) AS INT) AS file_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
FROM
pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
-"""
-
-BACKENDS = """
+""",
+ BACKENDS="""
SELECT
count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
(SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
-FROM
- pg_stat_activity;
-"""
-
-TABLE_STATS = """
+FROM pg_stat_activity;
+""",
+ TABLE_STATS="""
SELECT
- ((sum(relpages) * 8) * 1024) AS size_relations,
- count(1) AS relations
+ ((sum(relpages) * 8) * 1024) AS table_size,
+ count(1) AS table_count
FROM pg_class
WHERE relkind IN ('r', 't');
-"""
-
-INDEX_STATS = """
+""",
+ INDEX_STATS="""
SELECT
- ((sum(relpages) * 8) * 1024) AS size_indexes,
- count(1) AS indexes
+ ((sum(relpages) * 8) * 1024) AS index_size,
+ count(1) AS index_count
FROM pg_class
-WHERE relkind = 'i';"""
-
-DATABASE = """
+WHERE relkind = 'i';""",
+ DATABASE="""
SELECT
datname AS database_name,
sum(numbackends) AS connections,
@@ -61,93 +96,108 @@ SELECT
sum(tup_inserted) AS tup_inserted,
sum(tup_updated) AS tup_updated,
sum(tup_deleted) AS tup_deleted,
- sum(conflicts) AS conflicts
+ sum(conflicts) AS conflicts,
+ pg_database_size(datname) AS size
FROM pg_stat_database
WHERE NOT datname ~* '^template\d+'
GROUP BY database_name;
-"""
-
-BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
-DATABASE_LOCKS = """
+""",
+ BGWRITER="""
+SELECT
+ checkpoints_timed AS writer_scheduled,
+ checkpoints_req AS writer_requested
+FROM pg_stat_bgwriter;""",
+ LOCKS="""
SELECT
pg_database.datname as database_name,
mode,
- count(mode) AS count
+ count(mode) AS locks_count
FROM pg_locks
INNER JOIN pg_database ON pg_database.oid = pg_locks.database
GROUP BY datname, mode
ORDER BY datname, mode;
-"""
-REPLICATION = """
-SELECT
- client_hostname,
- client_addr,
- state,
- sent_offset - (
- replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
-FROM (
- SELECT
- client_addr, client_hostname, state,
- ('x' || lpad(split_part(sent_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
- ('x' || lpad(split_part(replay_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
- ('x' || lpad(split_part(sent_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
- ('x' || lpad(split_part(replay_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
- FROM pg_stat_replication
-) AS s;
-"""
-
-LOCK_TYPES = [
- 'ExclusiveLock',
- 'RowShareLock',
- 'SIReadLock',
- 'ShareUpdateExclusiveLock',
- 'AccessExclusiveLock',
- 'AccessShareLock',
- 'ShareRowExclusiveLock',
- 'ShareLock',
- 'RowExclusiveLock'
-]
-
-ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write',
+""",
+ FIND_DATABASES="""
+SELECT datname FROM pg_stat_database WHERE NOT datname ~* '^template\d+'
+""",
+ IF_SUPERUSER="""
+SELECT current_setting('is_superuser') = 'on' AS is_superuser;
+ """)
+
+# REPLICATION = """
+# SELECT
+# client_hostname,
+# client_addr,
+# state,
+# sent_offset - (
+# replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
+# FROM (
+# SELECT
+# client_addr, client_hostname, state,
+# ('x' || lpad(split_part(sent_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
+# ('x' || lpad(split_part(replay_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
+# ('x' || lpad(split_part(sent_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
+# ('x' || lpad(split_part(replay_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
+# FROM pg_stat_replication
+# ) AS s;
+# """
+
+
+QUERY_STATS = {
+ QUERIES['DATABASE']: METRICS['DATABASE'],
+ QUERIES['BACKENDS']: METRICS['BACKENDS'],
+ QUERIES['ARCHIVE']: METRICS['ARCHIVE'],
+ QUERIES['LOCKS']: METRICS['LOCKS']
+}
+
+ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write', 'database_size',
'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'background_writer']
CHARTS = {
'db_stat_transactions': {
- 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions', 'line'],
+ 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions',
+ 'line'],
'lines': [
- ['db_stat_xact_commit', 'committed', 'incremental'],
- ['db_stat_xact_rollback', 'rolled back', 'incremental']
+ ['xact_commit', 'committed', 'incremental'],
+ ['xact_rollback', 'rolled back', 'incremental']
]},
'db_stat_connections': {
- 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections', 'line'],
+ 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
+ 'line'],
'lines': [
- ['db_stat_connections', 'connections', 'absolute']
+ ['connections', 'connections', 'absolute']
]},
'db_stat_tuple_read': {
'options': [None, 'Tuple reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_tuple_read', 'line'],
'lines': [
- ['db_stat_blks_read', 'disk', 'incremental'],
- ['db_stat_blks_hit', 'cache', 'incremental']
+ ['blks_read', 'disk', 'incremental'],
+ ['blks_hit', 'cache', 'incremental']
]},
'db_stat_tuple_returned': {
- 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned', 'line'],
+ 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
+ 'line'],
'lines': [
- ['db_stat_tup_returned', 'sequential', 'incremental'],
- ['db_stat_tup_fetched', 'bitmap', 'incremental']
+ ['tup_returned', 'sequential', 'incremental'],
+ ['tup_fetched', 'bitmap', 'incremental']
]},
'db_stat_tuple_write': {
'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
'lines': [
- ['db_stat_tup_inserted', 'inserted', 'incremental'],
- ['db_stat_tup_updated', 'updated', 'incremental'],
- ['db_stat_tup_deleted', 'deleted', 'incremental'],
- ['db_stat_conflicts', 'conflicts', 'incremental']
+ ['tup_inserted', 'inserted', 'incremental'],
+ ['tup_updated', 'updated', 'incremental'],
+ ['tup_deleted', 'deleted', 'incremental'],
+ ['conflicts', 'conflicts', 'incremental']
+ ]},
+ 'database_size': {
+ 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
+ 'lines': [
]},
'backend_process': {
- 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process', 'line'],
+ 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
+ 'line'],
'lines': [
- ['backend_process_active', 'active', 'absolute'],
- ['backend_process_idle', 'idle', 'absolute']
+ ['backends_active', 'active', 'absolute'],
+ ['backends_idle', 'idle', 'absolute']
]},
'index_count': {
'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
@@ -172,15 +222,15 @@ CHARTS = {
'wal': {
'options': [None, 'Write-Ahead Logging Statistics', 'files/s', 'write ahead log', 'postgres.wal', 'line'],
'lines': [
- ['wal_total', 'total', 'incremental'],
- ['wal_ready', 'ready', 'incremental'],
- ['wal_done', 'done', 'incremental']
+ ['file_count', 'total', 'incremental'],
+ ['ready_count', 'ready', 'incremental'],
+ ['done_count', 'done', 'incremental']
]},
'background_writer': {
'options': [None, 'Checkpoints', 'writes/s', 'background writer', 'postgres.background_writer', 'line'],
'lines': [
- ['background_writer_scheduled', 'scheduled', 'incremental'],
- ['background_writer_requested', 'requested', 'incremental']
+ ['writer_scheduled', 'scheduled', 'incremental'],
+ ['writer_requested', 'requested', 'incremental']
]}
}
@@ -188,199 +238,169 @@ CHARTS = {
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
super(self.__class__, self).__init__(configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.table_stats = configuration.pop('table_stats', True)
- self.index_stats = configuration.pop('index_stats', True)
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+ self.table_stats = configuration.pop('table_stats', False)
+ self.index_stats = configuration.pop('index_stats', False)
self.configuration = configuration
- self.connection = None
+ self.connection = False
self.is_superuser = False
- self.data = {}
- self.databases = set()
+ self.data = dict()
+ self.locks_zeroed = dict()
+ self.databases = list()
def _connect(self):
params = dict(user='postgres',
database=None,
password=None,
- host='localhost',
+ host=None,
port=5432)
params.update(self.configuration)
if not self.connection:
- self.connection = psycopg2.connect(**params)
- self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
- self.connection.set_session(readonly=True)
+ try:
+ self.connection = psycopg2.connect(**params)
+ self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.connection.set_session(readonly=True)
+ except OperationalError as error:
+ return False, str(error)
+ return True, True
def check(self):
+ if not PSYCOPG2:
+ self.error('\'python-psycopg2\' module is needed to use postgres.chart.py')
+ return False
+ result, error = self._connect()
+ if not result:
+ conf = dict([(k, (lambda k, v: v if k != 'password' else '*****')(k, v)) for k, v in self.configuration.items()])
+ self.error('Failed to connect to %s. Error: %s' % (str(conf), error))
+ return False
try:
- self._connect()
cursor = self.connection.cursor()
- self._discover_databases(cursor)
- self._check_if_superuser(cursor)
+ self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES'])
+ is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
cursor.close()
- self._create_definitions()
+ self.locks_zeroed = populate_lock_types(self.databases)
+ self.add_additional_queries_(is_superuser)
+ self.create_dynamic_charts_()
return True
- except Exception as e:
- self.error(str(e))
+ except Exception as error:
+ self.error(str(error))
return False
- def _discover_databases(self, cursor):
- cursor.execute("""
- SELECT datname
- FROM pg_stat_database
- WHERE NOT datname ~* '^template\d+'
- """)
- self.databases = set(r[0] for r in cursor)
-
- def _check_if_superuser(self, cursor):
- cursor.execute("""
- SELECT current_setting('is_superuser') = 'on' AS is_superuser;
- """)
- self.is_superuser = cursor.fetchone()[0]
-
- def _create_definitions(self):
- for database_name in self.databases:
- for chart_template_name in list(CHARTS):
- if chart_template_name.startswith('db_stat'):
- self._add_database_stat_chart(chart_template_name, database_name)
- self._add_database_lock_chart(database_name)
-
- def _add_database_stat_chart(self, chart_template_name, database_name):
- chart_template = CHARTS[chart_template_name]
- chart_name = "{0}_{1}".format(database_name, chart_template_name)
- if chart_name not in self.order:
- self.order.insert(0, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
- self.definitions[chart_name] = {
- 'options': [
- name,
- title + ': ' + database_name,
- units,
- 'db ' + database_name,
- context,
- chart_type
- ]
- }
-
- self.definitions[chart_name]['lines'] = []
- for line in deepcopy(chart_template['lines']):
- line[0] = "{0}_{1}".format(database_name, line[0])
- self.definitions[chart_name]['lines'].append(line)
-
- def _add_database_lock_chart(self, database_name):
- chart_name = "{0}_locks".format(database_name)
- if chart_name not in self.order:
- self.order.insert(-1, chart_name)
- self.definitions[chart_name] = dict(
- options=
- [
- None,
- 'Locks on db: ' + database_name,
- 'locks',
- 'db ' + database_name,
- 'postgres.db_locks',
- 'line'
- ],
- lines=[]
- )
-
- for lock_type in LOCK_TYPES:
- lock_id = "{0}_{1}".format(database_name, lock_type)
- label = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", lock_type)
- self.definitions[chart_name]['lines'].append([lock_id, label, 'absolute'])
-
- def _get_data(self):
- self._connect()
-
- cursor = self.connection.cursor(cursor_factory=DictCursor)
- self.add_stats(cursor)
-
- cursor.close()
- return self.data
-
- def add_stats(self, cursor):
- self.add_database_stats(cursor)
- self.add_backend_stats(cursor)
+ def add_additional_queries_(self, is_superuser):
if self.index_stats:
- self.add_index_stats(cursor)
+ QUERY_STATS[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
if self.table_stats:
- self.add_table_stats(cursor)
- self.add_lock_stats(cursor)
- self.add_bgwriter_stats(cursor)
+ QUERY_STATS[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
+ if is_superuser:
+ QUERY_STATS[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- # self.add_replication_stats(cursor)
+ def create_dynamic_charts_(self):
- if self.is_superuser:
- self.add_wal_stats(cursor)
+ for database_name in self.databases[::-1]:
+ self.definitions['database_size']['lines'].append([database_name + '_size',
+ database_name, 'absolute', 1, 1024 * 1024])
+ for chart_name in [name for name in CHARTS if name.startswith('db_stat')]:
+ add_database_stat_chart_(order=self.order, definitions=self.definitions,
+ name=chart_name, database_name=database_name)
- def add_database_stats(self, cursor):
- cursor.execute(DATABASE)
- for row in cursor:
- database_name = row.get('database_name')
- self.data["{0}_{1}".format(database_name, 'db_stat_xact_commit')] = int(row.get('xact_commit', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_xact_rollback')] = int(row.get('xact_rollback', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_blks_read')] = int(row.get('blks_read', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_blks_hit')] = int(row.get('blks_hit', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_tup_returned')] = int(row.get('tup_returned', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_tup_fetched')] = int(row.get('tup_fetched', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_tup_inserted')] = int(row.get('tup_inserted', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_tup_updated')] = int(row.get('tup_updated', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_tup_deleted')] = int(row.get('tup_deleted', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_conflicts')] = int(row.get('conflicts', 0))
- self.data["{0}_{1}".format(database_name, 'db_stat_connections')] = int(row.get('connections', 0))
-
- def add_backend_stats(self, cursor):
- cursor.execute(BACKENDS)
- temp = cursor.fetchone()
-
- self.data['backend_process_active'] = int(temp.get('backends_active', 0))
- self.data['backend_process_idle'] = int(temp.get('backends_idle', 0))
-
- def add_index_stats(self, cursor):
- cursor.execute(INDEX_STATS)
- temp = cursor.fetchone()
- self.data['index_count'] = int(temp.get('indexes', 0))
- self.data['index_size'] = int(temp.get('size_indexes', 0))
-
- def add_table_stats(self, cursor):
- cursor.execute(TABLE_STATS)
- temp = cursor.fetchone()
- self.data['table_count'] = int(temp.get('relations', 0))
- self.data['table_size'] = int(temp.get('size_relations', 0))
-
- def add_lock_stats(self, cursor):
- cursor.execute(DATABASE_LOCKS)
-
- # zero out all current lock values
- for database_name in self.databases:
- for lock_type in LOCK_TYPES:
- self.data["{0}_{1}".format(database_name, lock_type)] = 0
-
- # populate those that have current locks
+ add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
+
+ def _get_data(self):
+ result, error = self._connect()
+ if result:
+ cursor = self.connection.cursor(cursor_factory=DictCursor)
+ try:
+ self.data.update(self.locks_zeroed)
+ for query, metrics in QUERY_STATS.items():
+ self.query_stats_(cursor, query, metrics)
+
+ except OperationalError:
+ self.connection = False
+ cursor.close()
+ return None
+ else:
+ cursor.close()
+ return self.data
+ else:
+ return None
+
+ def query_stats_(self, cursor, query, metrics):
+ cursor.execute(query)
for row in cursor:
- database_name, lock_type, lock_count = row
- self.data["{0}_{1}".format(database_name, lock_type)] = lock_count
-
- def add_wal_stats(self, cursor):
- cursor.execute(ARCHIVE)
- temp = cursor.fetchone()
- self.data['wal_total'] = int(temp.get('file_count', 0))
- self.data['wal_ready'] = int(temp.get('ready_count', 0))
- self.data['wal_done'] = int(temp.get('done_count', 0))
-
- def add_bgwriter_stats(self, cursor):
- cursor.execute(BGWRITER)
- temp = cursor.fetchone()
- self.data['background_writer_scheduled'] = temp.get('checkpoints_timed', 0)
- self.data['background_writer_requested'] = temp.get('checkpoints_requests', 0)
-
-'''
- def add_replication_stats(self, cursor):
- cursor.execute(REPLICATION)
- temp = cursor.fetchall()
- for row in temp:
- self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
- 'byte_lag',
- int(row.get('byte_lag', 0)))
-'''
+ for metric in metrics:
+ dimension_id = '_'.join([row['database_name'], metric]) if 'database_name' in row else metric
+ if metric in row:
+ self.data[dimension_id] = int(row[metric])
+ elif 'locks_count' in row:
+ self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
+
+
+def discover_databases_(cursor, query):
+ cursor.execute(query)
+ result = list()
+ for db in [database[0] for database in cursor]:
+ if db not in result:
+ result.append(db)
+ return result
+
+
+def check_if_superuser_(cursor, query):
+ cursor.execute(query)
+ return cursor.fetchone()[0]
+
+
+def populate_lock_types(databases):
+ result = dict()
+ for database in databases:
+ for lock_type in METRICS['LOCKS']:
+ key = '_'.join([database, lock_type])
+ result[key] = 0
+
+ return result
+
+
+def add_database_lock_chart_(order, definitions, database_name):
+ def create_lines(database):
+ result = list()
+ for lock_type in METRICS['LOCKS']:
+ dimension_id = '_'.join([database, lock_type])
+ result.append([dimension_id, lock_type, 'absolute'])
+ return result
+
+ chart_name = database_name + '_locks'
+ order.insert(-1, chart_name)
+ definitions[chart_name] = {
+ 'options':
+ [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'],
+ 'lines': create_lines(database_name)
+ }
+
+
+def add_database_stat_chart_(order, definitions, name, database_name):
+ def create_lines(database, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([database, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([database_name, name])
+ order.insert(0, chart_name)
+ name, title, units, family, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
+ 'lines': create_lines(database_name, chart_template['lines'])}
+
+
+#
+# def add_replication_stats(self, cursor):
+# cursor.execute(REPLICATION)
+# temp = cursor.fetchall()
+# for row in temp:
+# self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
+# 'byte_lag',
+# int(row.get('byte_lag', 0)))
diff --git a/python.d/python_modules/__init__.py b/python.d/python_modules/__init__.py
index 8d1c8b69c..8d1c8b69c 100755..100644
--- a/python.d/python_modules/__init__.py
+++ b/python.d/python_modules/__init__.py
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
index 320c54bae..859300eca 100644
--- a/python.d/python_modules/base.py
+++ b/python.d/python_modules/base.py
@@ -18,19 +18,39 @@
# using ".encode()" in one thread can block other threads as well (only in python2)
import time
-# import sys
import os
import socket
import select
+import threading
+import msg
+import ssl
+from subprocess import Popen, PIPE
+from sys import exc_info
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
try:
import urllib.request as urllib2
except ImportError:
import urllib2
-from subprocess import Popen, PIPE
+try:
+ import MySQLdb
+ PYMYSQL = True
+except ImportError:
+ try:
+ import pymysql as MySQLdb
+ PYMYSQL = True
+ except ImportError:
+ PYMYSQL = False
-import threading
-import msg
+try:
+ PATH = os.getenv('PATH').split(':')
+except AttributeError:
+ PATH = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'.split(':')
# class BaseService(threading.Thread):
@@ -61,6 +81,7 @@ class SimpleService(threading.Thread):
self.__first_run = True
self.order = []
self.definitions = {}
+ self._data_from_check = dict()
if configuration is None:
self.error("BaseService: no configuration parameters supplied. Cannot create Service.")
raise RuntimeError
@@ -385,7 +406,7 @@ class SimpleService(threading.Thread):
Create charts
:return: boolean
"""
- data = self._get_data()
+ data = self._data_from_check or self._get_data()
if data is None:
self.debug("failed to receive data during create().")
return False
@@ -431,101 +452,93 @@ class SimpleService(threading.Thread):
return updated
+ @staticmethod
+ def find_binary(binary):
+ try:
+ if isinstance(binary, str):
+ binary = os.path.basename(binary)
+ return next(('/'.join([p, binary]) for p in PATH
+ if os.path.isfile('/'.join([p, binary]))
+ and os.access('/'.join([p, binary]), os.X_OK)))
+ else:
+ return None
+ except StopIteration:
+ return None
+
class UrlService(SimpleService):
- # TODO add support for https connections
def __init__(self, configuration=None, name=None):
- self.url = ""
- self.user = None
- self.password = None
- self.proxies = {}
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.ss_cert = self.configuration.get('ss_cert')
def __add_openers(self):
- # TODO add error handling
- self.opener = urllib2.build_opener()
-
- # Proxy handling
- # TODO currently self.proxies isn't parsed from configuration file
- # if len(self.proxies) > 0:
- # for proxy in self.proxies:
- # url = proxy['url']
- # # TODO test this:
- # if "user" in proxy and "pass" in proxy:
- # if url.lower().startswith('https://'):
- # url = 'https://' + proxy['user'] + ':' + proxy['pass'] + '@' + url[8:]
- # else:
- # url = 'http://' + proxy['user'] + ':' + proxy['pass'] + '@' + url[7:]
- # # FIXME move proxy auth to sth like this:
- # # passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- # # passman.add_password(None, url, proxy['user'], proxy['password'])
- # # opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
- #
- # if url.lower().startswith('https://'):
- # opener.add_handler(urllib2.ProxyHandler({'https': url}))
- # else:
- # opener.add_handler(urllib2.ProxyHandler({'https': url}))
+ def self_signed_cert(ss_cert):
+ if ss_cert:
+ try:
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+ return urllib2.build_opener(urllib2.HTTPSHandler(context=ctx))
+ except AttributeError:
+ return None
+ else:
+ return None
+
+ self.opener = self_signed_cert(self.ss_cert) or urllib2.build_opener()
# HTTP Basic Auth
- if self.user is not None and self.password is not None:
+ if self.user and self.password:
+ url_parse = urlparse(self.url)
+ top_level_url = '://'.join([url_parse.scheme, url_parse.netloc])
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- passman.add_password(None, self.url, self.user, self.password)
+ passman.add_password(None, top_level_url, self.user, self.password)
self.opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
self.debug("Enabling HTTP basic auth")
- #urllib2.install_opener(opener)
-
- def _get_raw_data(self):
+ def _get_raw_data(self, custom_url=None):
"""
Get raw data from http request
:return: str
"""
- raw = None
+ raw_data = None
+ f = None
try:
- f = self.opener.open(self.url, timeout=self.update_every * 2)
- # f = urllib2.urlopen(self.url, timeout=self.update_every * 2)
- except Exception as e:
- self.error(str(e))
+ f = self.opener.open(custom_url or self.url, timeout=self.update_every * 2)
+ raw_data = f.read().decode('utf-8', 'ignore')
+ except Exception as error:
+ self.error('Url: %s. Error: %s' %(custom_url or self.url, str(error)))
return None
-
- try:
- raw = f.read().decode('utf-8', 'ignore')
- except Exception as e:
- self.error(str(e))
finally:
- f.close()
- return raw
+ if f is not None: f.close()
+
+ return raw_data or None
def check(self):
"""
Format configuration data and try to connect to server
:return: boolean
"""
- if self.name is None or self.name == str(None):
- self.name = 'local'
- self.chart_name += "_" + self.name
- else:
- self.name = str(self.name)
- try:
- self.url = str(self.configuration['url'])
- except (KeyError, TypeError):
- pass
- try:
- self.user = str(self.configuration['user'])
- except (KeyError, TypeError):
- pass
- try:
- self.password = str(self.configuration['pass'])
- except (KeyError, TypeError):
- pass
+ if not (self.url and isinstance(self.url, str)):
+ self.error('URL is not defined or type is not <str>')
+ return False
self.__add_openers()
- test = self._get_data()
- if test is None or len(test) == 0:
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Url: %s. Error: %s' % (self.url, error))
return False
- else:
+
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
return True
+ else:
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
class SocketService(SimpleService):
@@ -829,63 +842,212 @@ class LogService(SimpleService):
class ExecutableService(SimpleService):
- bad_substrings = ('&', '|', ';', '>', '<')
def __init__(self, configuration=None, name=None):
- self.command = ""
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.command = None
def _get_raw_data(self):
"""
Get raw data from executed command
- :return: str
+ :return: <list>
"""
try:
p = Popen(self.command, stdout=PIPE, stderr=PIPE)
- except Exception as e:
- self.error("Executing command", self.command, "resulted in error:", str(e))
+ except Exception as error:
+ self.error("Executing command", self.command, "resulted in error:", str(error))
return None
- data = []
+ data = list()
for line in p.stdout.readlines():
- data.append(str(line.decode()))
+ data.append(line.decode())
- if len(data) == 0:
- self.error("No data collected.")
- return None
-
- return data
+ return data or None
def check(self):
"""
Parse basic configuration, check if command is whitelisted and is returning values
- :return: boolean
+ :return: <boolean>
"""
- if self.name is not None or self.name != str(None):
- self.name = ""
+ # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
+ if 'command' in self.configuration:
+ self.command = self.configuration['command']
+
+ # "command" must be: 1.not None 2. type <str>
+ if not (self.command and isinstance(self.command, str)):
+ self.error('Command is not defined or command type is not <str>')
+ return False
+
+ # Split "command" into: 1. command <str> 2. options <list>
+ command, opts = self.command.split()[0], self.command.split()[1:]
+
+ # Check for "bad" symbols in options. No pipes, redirects etc. TODO: what is missing?
+ bad_opts = set(''.join(opts)) & set(['&', '|', ';', '>', '<'])
+ if bad_opts:
+ self.error("Bad command argument(s): %s" % bad_opts)
+ return False
+
+ # Find absolute path ('echo' => '/bin/echo')
+ if '/' not in command:
+ command = self.find_binary(command)
+ if not command:
+ self.error('Can\'t locate "%s" binary in PATH(%s)' % (self.command, PATH))
+ return False
+ # Check if binary exist and executable
else:
- self.name = str(self.name)
+ if not (os.path.isfile(command) and os.access(command, os.X_OK)):
+ self.error('"%s" is not a file or not executable' % command)
+ return False
+
+ self.command = [command] + opts if opts else [command]
+
try:
- self.command = str(self.configuration['command'])
- except (KeyError, TypeError):
- self.info("No command specified. Using: '" + self.command + "'")
- # Splitting self.command on every space so subprocess.Popen reads it properly
- self.command = self.command.split(' ')
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Command: %s. Error: %s' % (self.command, error))
+ return False
- for arg in self.command[1:]:
- if any(st in arg for st in self.bad_substrings):
- self.error("Bad command argument:" + " ".join(self.command[1:]))
- return False
+ if isinstance(data, dict) and data:
+ # We need this for create() method. No reason to execute get_data() again if result is not empty dict()
+ self._data_from_check = data
+ return True
+ else:
+ self.error("Command", str(self.command), "returned no data")
+ return False
+
+
+class MySQLService(SimpleService):
+
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.__connection = None
+ self.__conn_properties = dict()
+ self.extra_conn_properties = dict()
+ self.__queries = self.configuration.get('queries', dict())
+ self.queries = dict()
+
+ def __connect(self):
+ try:
+ connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
+ except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
+ return None, str(error)
+ else:
+ return connection, None
- # test command and search for it in /usr/sbin or /sbin when failed
- base = self.command[0].split('/')[-1]
- if self._get_raw_data() is None:
- for prefix in ['/sbin/', '/usr/sbin/']:
- self.command[0] = prefix + base
- if os.path.isfile(self.command[0]):
- break
+ def check(self):
+ def get_connection_properties(conf, extra_conf):
+ properties = dict()
+ if 'user' in conf and conf['user']:
+ properties['user'] = conf['user']
+ if 'pass' in conf and conf['pass']:
+ properties['passwd'] = conf['pass']
+ if 'socket' in conf and conf['socket']:
+ properties['unix_socket'] = conf['socket']
+ elif 'host' in conf and conf['host']:
+ properties['host'] = conf['host']
+ properties['port'] = int(conf.get('port', 3306))
+ elif 'my.cnf' in conf and conf['my.cnf']:
+ properties['read_default_file'] = conf['my.cnf']
+ if isinstance(extra_conf, dict) and extra_conf:
+ properties.update(extra_conf)
+
+ return properties or None
+
+ def is_valid_queries_dict(raw_queries, log_error):
+ """
+ :param raw_queries: dict:
+ :param log_error: function:
+ :return: dict or None
+
+ raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
+ """
+ def is_valid_query(query):
+ return all([isinstance(query, str),
+ query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
+
+ if hasattr(raw_queries, 'keys') and raw_queries:
+ valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
+ bad_queries = set(raw_queries) - set(valid_queries)
+
+ if bad_queries:
+ log_error('Removed query(s): %s' % bad_queries)
+ return valid_queries
+ else:
+ log_error('Unsupported "queries" format. Must be not empty <dict>')
+ return None
- if self._get_data() is None or len(self._get_data()) == 0:
- self.error("Command", self.command, "returned no data")
+ if not PYMYSQL:
+ self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
return False
- return True
+ # Preference: 1. "queries" from the configuration file 2. "queries" from the module
+ self.queries = self.__queries or self.queries
+ # Check if "self.queries" exist, not empty and all queries are in valid format
+ self.queries = is_valid_queries_dict(self.queries, self.error)
+ if not self.queries:
+ return None
+
+ # Get connection properties
+ self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
+ if not self.__conn_properties:
+ self.error('Connection properties are missing')
+ return False
+
+ # Create connection to the database
+ self.__connection, error = self.__connect()
+ if error:
+ self.error('Can\'t establish connection to MySQL: %s' % error)
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Error: %s' % error)
+ return False
+
+ if isinstance(data, dict) and data:
+ # We need this for create() method
+ self._data_from_check = data
+ return True
+ else:
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
+
+ def _get_raw_data(self, description=None):
+ """
+ Get raw data from MySQL server
+ :return: dict: fetchall() or (fetchall(), description)
+ """
+
+ if not self.__connection:
+ self.__connection, error = self.__connect()
+ if error:
+ return None
+
+ raw_data = dict()
+ queries = dict(self.queries)
+ try:
+ with self.__connection as cursor:
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ raise RuntimeError
+ self.error('Removed query: %s[%s]. Error: %s'
+ % (name, query, error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ self.__connection.commit()
+ except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
+ self.__connection.close()
+ self.__connection = None
+ return None
+ else:
+ return raw_data or None
+
+ @staticmethod
+ def __is_error_critical(err_class, err_text):
+ return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
+ 'Unknown column' not in err_text])
diff --git a/python.d/smartd_log.chart.py b/python.d/smartd_log.chart.py
new file mode 100644
index 000000000..e80372379
--- /dev/null
+++ b/python.d/smartd_log.chart.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+# Description: smart netdata python.d module
+# Author: l2isbad, vorph1
+
+from re import compile
+from os import listdir, access, R_OK
+from os.path import isfile, join, getsize, basename, isdir
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from threading import Thread
+from base import SimpleService
+from collections import namedtuple
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200']
+
+SMART_ATTR = {
+ '1': 'Read Error Rate',
+ '2': 'Throughput Performance',
+ '3': 'Spin-Up Time',
+ '4': 'Start/Stop Count',
+ '5': 'Reallocated Sectors Count',
+ '6': 'Read Channel Margin',
+ '7': 'Seek Error Rate',
+ '8': 'Seek Time Performance',
+ '9': 'Power-On Hours Count',
+ '10': 'Spin-up Retries',
+ '11': 'Calibration Retries',
+ '12': 'Power Cycle Count',
+ '13': 'Soft Read Error Rate',
+ '100': 'Erase/Program Cycles',
+ '103': 'Translation Table Rebuild',
+ '108': 'Unknown (108)',
+ '170': 'Reserved Block Count',
+ '171': 'Program Fail Count',
+ '172': 'Erase Fail Count',
+ '173': 'Wear Leveller Worst Case Erase Count',
+ '174': 'Unexpected Power Loss',
+ '175': 'Program Fail Count',
+ '176': 'Erase Fail Count',
+ '177': 'Wear Leveling Count',
+ '178': 'Used Reserved Block Count',
+ '179': 'Used Reserved Block Count',
+ '180': 'Unused Reserved Block Count',
+ '181': 'Program Fail Count',
+ '182': 'Erase Fail Count',
+ '183': 'SATA Downshifts',
+ '184': 'End-to-End error',
+ '185': 'Head Stability',
+ '186': 'Induced Op-Vibration Detection',
+ '187': 'Reported Uncorrectable Errors',
+ '188': 'Command Timeout',
+ '189': 'High Fly Writes',
+ '190': 'Temperature',
+ '191': 'G-Sense Errors',
+ '192': 'Power-Off Retract Cycles',
+ '193': 'Load/Unload Cycles',
+ '194': 'Temperature',
+ '195': 'Hardware ECC Recovered',
+ '196': 'Reallocation Events',
+ '197': 'Current Pending Sectors',
+ '198': 'Off-line Uncorrectable',
+ '199': 'UDMA CRC Error Rate',
+ '200': 'Write Error Rate',
+ '201': 'Soft Read Errors',
+ '202': 'Data Address Mark Errors',
+ '203': 'Run Out Cancel',
+ '204': 'Soft ECC Corrections',
+ '205': 'Thermal Asperity Rate',
+ '206': 'Flying Height',
+ '207': 'Spin High Current',
+ '209': 'Offline Seek Performance',
+ '220': 'Disk Shift',
+ '221': 'G-Sense Error Rate',
+ '222': 'Loaded Hours',
+ '223': 'Load/Unload Retries',
+ '224': 'Load Friction',
+ '225': 'Load/Unload Cycles',
+ '226': 'Load-in Time',
+ '227': 'Torque Amplification Count',
+ '228': 'Power-Off Retracts',
+ '230': 'GMR Head Amplitude',
+ '231': 'Temperature',
+ '232': 'Available Reserved Space',
+ '233': 'Media Wearout Indicator',
+ '240': 'Head Flying Hours',
+ '241': 'Total LBAs Written',
+ '242': 'Total LBAs Read',
+ '250': 'Read Error Retry Rate'
+}
+
+NAMED_DISKS = namedtuple('disks', ['name', 'size', 'number'])
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.regex = compile(r'(\d+);(\d+);(\d+)')
+ self.log_path = self.configuration.get('log_path', '/var/log/smartd')
+ self.raw_values = self.configuration.get('raw_values')
+ self.attr = self.configuration.get('smart_attributes', [])
+ self.previous_data = dict()
+
+ def check(self):
+ # Can\'t start without smartd readable diks log files
+ disks = find_disks_in_log_path(self.log_path)
+ if not disks:
+ self.error('Can\'t locate any smartd log files in %s' % self.log_path)
+ return False
+
+ # List of namedtuples to track smartd log file size
+ self.disks = [NAMED_DISKS(name=disks[i], size=0, number=i) for i in range(len(disks))]
+
+ if self._get_data():
+ self.create_charts()
+ return True
+ else:
+ self.error('Can\'t collect any data. Sorry.')
+ return False
+
+ def _get_raw_data(self, queue, disk):
+ # The idea is to open a file.
+ # Jump to the end.
+ # Seek backward until '\n' symbol appears
+ # If '\n' is found or it's the beginning of the file
+ # readline()! (last or first line)
+ with open(disk, 'rb') as f:
+ f.seek(-2, 2)
+ while f.read(1) != b'\n':
+ f.seek(-2, 1)
+ if f.tell() == 0:
+ break
+ result = f.readline()
+
+ result = result.decode()
+ result = self.regex.findall(result)
+
+ queue.put([basename(disk), result])
+
+ def _get_data(self):
+ threads, result = list(), list()
+ queue = Queue()
+ to_netdata = dict()
+
+ # If the size has not changed there is no reason to poll log files.
+ disks = [disk for disk in self.disks if self.size_changed(disk)]
+ if disks:
+ for disk in disks:
+ th = Thread(target=self._get_raw_data, args=(queue, disk.name))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.append(queue.get())
+ else:
+ # Data from last real poll
+ return self.previous_data or None
+
+ for elem in result:
+ for a, n, r in elem[1]:
+ to_netdata.update({'_'.join([elem[0], a]): r if self.raw_values else n})
+
+ self.previous_data.update(to_netdata)
+
+ return to_netdata or None
+
+ def size_changed(self, disk):
+ # We are not interested in log files:
+ # 1. zero size
+ # 2. size is not changed since last poll
+ try:
+ size = getsize(disk.name)
+ if size != disk.size and size:
+ self.disks[disk.number] = disk._replace(size=size)
+ return True
+ else:
+ return False
+ except OSError:
+ # Remove unreadable/nonexisting log files from list of disks and previous_data
+ self.disks.remove(disk)
+ self.previous_data = dict([(k, v) for k, v in self.previous_data.items() if basename(disk.name) not in k])
+ return False
+
+ def create_charts(self):
+
+ def create_lines(attrid):
+ result = list()
+ for disk in self.disks:
+ name = basename(disk.name)
+ result.append(['_'.join([name, attrid]), name[:name.index('.')], 'absolute'])
+ return result
+
+ # Add additional smart attributes to the ORDER. If something goes wrong we don't care.
+ try:
+ ORDER.extend(list(set(self.attr.split()) & SMART_ATTR.keys() - set(ORDER)))
+ except Exception:
+ pass
+ self.order = [''.join(['attrid', i]) for i in ORDER]
+ self.definitions = dict()
+ units = 'raw' if self.raw_values else 'normalized'
+
+ for k, v in dict([(k, v) for k, v in SMART_ATTR.items() if k in ORDER]).items():
+ self.definitions.update({''.join(['attrid', k]): {
+ 'options': [None, v, units, v, 'smartd.attrid' + k, 'line'],
+ 'lines': create_lines(k)}})
+
+def find_disks_in_log_path(log_path):
+ # smartd log file is OK if:
+ # 1. it is a file
+ # 2. file name endswith with 'csv'
+ # 3. file is readable
+ if not isdir(log_path): return None
+ return [join(log_path, f) for f in listdir(log_path)
+ if all([isfile(join(log_path, f)), f.endswith('.csv'), access(join(log_path, f), R_OK)])]
diff --git a/python.d/tomcat.chart.py b/python.d/tomcat.chart.py
index 31f6ab248..c20f85e1e 100644
--- a/python.d/tomcat.chart.py
+++ b/python.d/tomcat.chart.py
@@ -2,11 +2,13 @@
# Description: tomcat netdata python.d module
# Author: Pawel Krupa (paulfantom)
-# Python version higher than 2.7 is needed to run this module.
-
from base import UrlService
-import xml.etree.ElementTree as ET # phone home...
-#from xml.parsers.expat import errors
+from re import compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -20,23 +22,23 @@ CHARTS = {
'accesses': {
'options': [None, "Requests", "requests/s", "statistics", "tomcat.accesses", "area"],
'lines': [
- ["accesses", None, 'incremental']
+ ["requestCount", 'accesses', 'incremental']
]},
'volume': {
'options': [None, "Volume", "KB/s", "volume", "tomcat.volume", "area"],
'lines': [
- ["volume", None, 'incremental', 1, 1024]
+ ["bytesSent", 'volume', 'incremental', 1, 1024]
]},
'threads': {
'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "line"],
'lines': [
- ["current", None, "absolute"],
- ["busy", None, "absolute"]
+ ["currentThreadCount", 'current', "absolute"],
+ ["currentThreadsBusy", 'busy', "absolute"]
]},
'jvm': {
'options': [None, "JVM Free Memory", "MB", "statistics", "tomcat.jvm", "area"],
'lines': [
- ["jvm", None, "absolute", 1, 1048576]
+ ["free", None, "absolute", 1, 1048576]
]}
}
@@ -44,68 +46,31 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- if len(self.url) == 0:
- self.url = "http://localhost:8080/manager/status?XML=true"
+ self.url = self.configuration.get('url', "http://127.0.0.1:8080/manager/status?XML=true")
self.order = ORDER
self.definitions = CHARTS
- self.port = 8080
def check(self):
- if UrlService.check(self):
- return True
-
- # get port from url
- self.port = 0
- for i in self.url.split('/'):
- try:
- int(i[-1])
- self.port = i.split(':')[-1]
- break
- except:
- pass
- if self.port == 0:
- self.port = 80
-
- test = self._get_data()
- if test is None or len(test) == 0:
- return False
- else:
- return True
+ netloc = urlparse(self.url).netloc.rpartition(':')
+ if netloc[1] == ':': port = netloc[2]
+ else: port = 80
+
+ self.regex_jvm = compile(r'<jvm>.*?</jvm>')
+ self.regex_connector = compile(r'[a-z-]+%s.*?/connector' % port)
+ self.regex = compile(r'([\w]+)=\\?[\'\"](\d+)\\?[\'\"]')
+
+ return UrlService.check(self)
def _get_data(self):
"""
Format data received from http request
:return: dict
"""
- try:
- raw = self._get_raw_data()
- try:
- data = ET.fromstring(raw)
- except ET.ParseError as e:
- # if e.code == errors.codes[errors.XML_ERROR_JUNK_AFTER_DOC_ELEMENT]:
- if e.code == 9:
- end = raw.find('</status>')
- end += 9
- raw = raw[:end]
- self.debug(raw)
- data = ET.fromstring(raw)
- else:
- raise Exception(e)
-
- memory = data.find('./jvm/memory')
- threads = data.find("./connector[@name='\"http-bio-" + str(self.port) + "\"']/threadInfo")
- requests = data.find("./connector[@name='\"http-bio-" + str(self.port) + "\"']/requestInfo")
+ data = self._get_raw_data()
+ if data:
+ jvm = self.regex_jvm.findall(data) or ['']
+ connector = self.regex_connector.findall(data) or ['']
+ data = dict(self.regex.findall(''.join([jvm[0], connector[0]])))
+
+ return data or None
- return {'accesses': requests.attrib['requestCount'],
- 'volume': requests.attrib['bytesSent'],
- 'current': threads.attrib['currentThreadCount'],
- 'busy': threads.attrib['currentThreadsBusy'],
- 'jvm': memory.attrib['free']}
- except (ValueError, AttributeError) as e:
- self.debug(str(e))
- return None
- except SyntaxError as e:
- self.error("Tomcat module needs python 2.7 at least. Stopping")
- self.debug(str(e))
- except Exception as e:
- self.debug(str(e))
diff --git a/python.d/varnish.chart.py b/python.d/varnish.chart.py
index 2b1512f4e..2665bb383 100644
--- a/python.d/varnish.chart.py
+++ b/python.d/varnish.chart.py
@@ -4,10 +4,8 @@
from base import SimpleService
from re import compile
-from os import access as is_executable, X_OK
from subprocess import Popen, PIPE
-
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
@@ -80,17 +78,11 @@ CHARTS = {'backend_health':
'options': [None, 'Varnish uptime', 'seconds', 'Uptime', 'varnish.uptime', 'line']}
}
-DIRECTORIES = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
-
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- try:
- self.varnish = [''.join([directory, 'varnishstat']) for directory in DIRECTORIES
- if is_executable(''.join([directory, 'varnishstat']), X_OK)][0]
- except IndexError:
- self.varnish = False
+ self.varnish = self.find_binary('varnishstat')
self.rgx_all = compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
# Could be
# VBE.boot.super_backend.pipe_hdrbyte (new)
@@ -104,7 +96,7 @@ class Service(SimpleService):
def check(self):
# Cant start without 'varnishstat' command
if not self.varnish:
- self.error('\'varnishstat\' command was not found in %s or not executable by netdata' % DIRECTORIES)
+ self.error('Can\'t locate \'varnishstat\' binary or binary is not executable by netdata')
return False
# If command is present and we can execute it we need to make sure..
@@ -145,7 +137,7 @@ class Service(SimpleService):
if not raw_data:
return None
- return raw_data
+ return raw_data.decode()
def _get_data(self):
"""
@@ -160,10 +152,10 @@ class Service(SimpleService):
return None
# 1. ALL data from 'varnishstat -1'. t - type(MAIN, MEMPOOL etc)
- to_netdata = {k: int(v) for t, k, v in data_all}
+ to_netdata = dict([(k, int(v)) for t, k, v in data_all])
# 2. ADD backend statistics
- to_netdata.update({'_'.join([n, k]): int(v) for n, k, v in data_backend})
+ to_netdata.update(dict([('_'.join([n, k]), int(v)) for n, k, v in data_backend]))
# 3. ADD additional keys to dict
# 3.1 Cache hit/miss/hitpass OVERALL in percent
diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py
new file mode 100644
index 000000000..cbc8cd235
--- /dev/null
+++ b/python.d/web_log.chart.py
@@ -0,0 +1,653 @@
+# -*- coding: utf-8 -*-
+# Description: web log netdata python.d module
+# Author: l2isbad
+
+from base import LogService
+import re
+import bisect
+from os import access, R_OK
+from os.path import getsize
+from collections import namedtuple
+from copy import deepcopy
+
+priority = 60000
+retries = 60
+
+ORDER = ['response_statuses', 'response_codes', 'bandwidth', 'response_time', 'requests_per_url', 'http_method',
+ 'http_version', 'requests_per_ipproto', 'clients', 'clients_all']
+CHARTS = {
+ 'response_codes': {
+ 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', '2xx', 'incremental'],
+ ['5xx', '5xx', 'incremental'],
+ ['3xx', '3xx', 'incremental'],
+ ['4xx', '4xx', 'incremental'],
+ ['1xx', '1xx', 'incremental'],
+ ['0xx', 'other', 'incremental'],
+ ['unmatched', 'unmatched', 'incremental']
+ ]},
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'KB/s', 'bandwidth', 'web_log.bandwidth', 'area'],
+ 'lines': [
+ ['resp_length', 'received', 'incremental', 1, 1024],
+ ['bytes_sent', 'sent', 'incremental', -1, 1024]
+ ]},
+ 'response_time': {
+ 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
+ 'lines': [
+ ['resp_time_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_avg', 'avg', 'incremental', 1, 1000]
+ ]},
+ 'clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
+ 'lines': [
+ ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]},
+ 'clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
+ ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
+ ]},
+ 'http_method': {
+ 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
+ 'lines': [
+ ['GET', 'GET', 'incremental', 1, 1]
+ ]},
+ 'http_version': {
+ 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
+ 'web_log.http_version', 'stacked'],
+ 'lines': []},
+ 'requests_per_ipproto': {
+ 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
+ 'stacked'],
+ 'lines': [
+ ['req_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['req_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]},
+ 'response_statuses': {
+ 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses',
+ 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]}
+}
+
+NAMED_URL_PATTERN = namedtuple('URL_PATTERN', ['description', 'pattern'])
+
+DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ """
+ :param configuration:
+ :param name:
+ # self._get_data = None # will be assigned in 'check' method.
+ # self.order = None # will be assigned in 'create_*_method' method.
+ # self.definitions = None # will be assigned in 'create_*_method' method.
+ """
+ LogService.__init__(self, configuration=configuration, name=name)
+ # Variables from module configuration file
+ self.log_type = self.configuration.get('type', 'web_access')
+ self.log_path = self.configuration.get('path')
+ self.url_pattern = self.configuration.get('categories') # dict
+ self.custom_log_format = self.configuration.get('custom_log_format') # dict
+ # Instance variables
+ self.regex = None # will be assigned in 'find_regex' or 'find_regex_custom' method
+ self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
+ 'resp_time_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
+ '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
+ 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
+ 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
+
+ def check(self):
+ """
+ :return: bool
+
+ 1. "log_path" is specified in the module configuration file
+ 2. "log_path" must be readable by netdata user and must exist
+ 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
+ 4. other checks depends on log "type"
+ """
+ if not self.log_path:
+ self.error('log path is not specified')
+ return False
+
+ if not access(self.log_path, R_OK):
+ self.error('%s not readable or not exist' % self.log_path)
+ return False
+
+ if not getsize(self.log_path):
+ self.error('%s is empty' % self.log_path)
+ return False
+
+ # Read last line (or first if there is only one line)
+ with open(self.log_path, 'rb') as logs:
+ logs.seek(-2, 2)
+ while logs.read(1) != b'\n':
+ logs.seek(-2, 1)
+ if logs.tell() == 0:
+ break
+ last_line = logs.readline()
+
+ try:
+ last_line = last_line.decode()
+ except UnicodeDecodeError:
+ try:
+ last_line = last_line.decode(encoding='utf-8')
+ except (TypeError, UnicodeDecodeError) as error:
+ self.error(str(error))
+ return False
+
+ if self.log_type == 'web_access':
+ self.unique_all_time = list() # sorted list of unique IPs
+ self.detailed_response_codes = self.configuration.get('detailed_response_codes', True)
+ self.detailed_response_aggregate = self.configuration.get('detailed_response_aggregate', True)
+ self.all_time = self.configuration.get('all_time', True)
+
+ # Custom_log_format or predefined log format.
+ if self.custom_log_format:
+ match_dict, error = self.find_regex_custom(last_line)
+ else:
+ match_dict, error = self.find_regex(last_line)
+
+ # "match_dict" is None if there are any problems
+ if match_dict is None:
+ self.error(str(error))
+ return False
+
+ # self.url_pattern check
+ if self.url_pattern:
+ self.url_pattern = check_req_per_url_pattern('rpu', self.url_pattern)
+
+ self.create_access_charts(match_dict) # Create charts
+ self._get_data = self._get_access_data # _get_data assignment
+ else:
+ self.error('Not implemented')
+ return False
+
+ # Double check
+ if not self.regex:
+ self.error('That can not happen, but it happened. "regex" is None')
+
+ self.info('Collected data: %s' % list(match_dict.keys()))
+ return True
+
+ def find_regex_custom(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+
+ We are here only if "custom_log_format" is in logs. We need to make sure:
+ 1. "custom_log_format" is a dict
+ 2. "pattern" in "custom_log_format" and pattern is <str> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
+
+ If all parameters is ok we need to make sure:
+ 1. Pattern search is success
+ 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
+
+ If pattern search is success we need to make sure:
+ 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
+
+ If this is True we need to make sure:
+ 1. All mandatory key values from "match_dict" have the correct format
+ ("code" is integer, "method" is uppercase word, etc)
+
+ If non mandatory keys in "match_dict" we need to make sure:
+ 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
+ ("resp_length" is integer or "-", "resp_time" is integer or float)
+
+ """
+ if not hasattr(self.custom_log_format, 'keys'):
+ return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
+
+ pattern = self.custom_log_format.get('pattern')
+ if not (pattern and isinstance(pattern, str)):
+ return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
+
+ resp_time_func = self.custom_log_format.get('time_multiplier') or 0
+
+ if not isinstance(resp_time_func, int):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
+
+ try:
+ regex = re.compile(pattern)
+ except re.error as error:
+ return find_regex_return(msg='Pattern compile error: %s' % str(error))
+
+ match = regex.search(last_line)
+ if match:
+ match_dict = match.groupdict() or None
+ else:
+ return find_regex_return(msg='Custom log: pattern search FAILED')
+
+ if match_dict is None:
+ find_regex_return(msg='Custom log: search OK but contains no named subgroups'
+ ' (you need to use ?P<subgroup_name>)')
+ else:
+ mandatory_dict = {'address': r'[\da-f.:]+',
+ 'code': r'[1-9]\d{2}',
+ 'method': r'[A-Z]+',
+ 'bytes_sent': r'\d+|-'}
+ optional_dict = {'resp_length': r'\d+',
+ 'resp_time': r'[\d.]+',
+ 'http_version': r'\d\.\d'}
+
+ mandatory_values = set(mandatory_dict) - set(match_dict)
+ if mandatory_values:
+ return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
+ % list(mandatory_values))
+ else:
+ for key in mandatory_dict:
+ if not re.search(mandatory_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ optional_values = set(optional_dict) & set(match_dict)
+ for key in optional_values:
+ if not re.search(optional_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ dot_in_time = '.' in match_dict.get('resp_time', '')
+ if dot_in_time:
+ self.resp_time_func = lambda time: time * (resp_time_func or 1000000)
+ else:
+ self.resp_time_func = lambda time: time * (resp_time_func or 1)
+
+ self.regex = regex
+ return find_regex_return(match_dict=match_dict)
+
+ def find_regex(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+ We need to find appropriate pattern for current log file
+ All logic is do a regex search through the string for all predefined patterns
+ until we find something or fail.
+ """
+ # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
+ # 5. Bytes sent 6. Response length 7. Response process time
+ acs_default = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)')
+
+ acs_apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+) ')
+
+ acs_apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+)'
+ r'(?: |$)')
+
+ acs_nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+) ')
+
+ acs_nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)')
+
+ def func_usec(time):
+ return time
+
+ def func_sec(time):
+ return time * 1000000
+
+ r_regex = [acs_apache_ext_insert, acs_apache_ext_append, acs_nginx_ext_insert,
+ acs_nginx_ext_append, acs_default]
+ r_function = [func_usec, func_usec, func_sec, func_sec, func_usec]
+ regex_function = zip(r_regex, r_function)
+
+ match_dict = dict()
+ for regex, function in regex_function:
+ match = regex.search(last_line)
+ if match:
+ self.regex = regex
+ self.resp_time_func = function
+ match_dict = match.groupdict()
+ break
+
+ return find_regex_return(match_dict=match_dict or None,
+ msg='Unknown log format. You need to use "custom_log_format" feature.')
+
+ def create_access_charts(self, match_dict):
+ """
+ :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
+ :return:
+ Create additional charts depending on the 'match_dict' keys and configuration file options
+ 1. 'time_response' chart is removed if there is no 'resp_time' in match_dict.
+ 2. Other stuff is just remove/add chart depending on yes/no in conf
+ """
+
+ def find_job_name(override_name, name):
+ """
+ :param override_name: str: 'name' var from configuration file
+ :param name: str: 'job_name' from configuration file
+ :return: str: new job name
+ We need this for dynamic charts. Actually same logic as in python.d.plugin.
+ """
+ add_to_name = override_name or name
+ if add_to_name:
+ return '_'.join(['web_log', re.sub('\s+', '_', add_to_name)])
+ else:
+ return 'web_log'
+
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+
+ job_name = find_job_name(self.override_name, self.name)
+
+ self.http_method_chart = 'CHART %s.http_method' \
+ ' "" "Requests Per HTTP Method" requests/s "http methods"' \
+ ' web_log.http_method stacked 11 %s\n' \
+ 'DIMENSION GET GET incremental\n' % (job_name, self.update_every)
+ self.http_version_chart = 'CHART %s.http_version' \
+ ' "" "Requests Per HTTP Version" requests/s "http versions"' \
+ ' web_log.http_version stacked 12 %s\n' % (job_name, self.update_every)
+
+ # Remove 'request_time' chart from ORDER if resp_time not in match_dict
+ if 'resp_time' not in match_dict:
+ self.order.remove('response_time')
+ # Remove 'clients_all' chart from ORDER if specified in the configuration
+ if not self.all_time:
+ self.order.remove('clients_all')
+ # Add 'detailed_response_codes' chart if specified in the configuration
+ if self.detailed_response_codes:
+ self.detailed_chart = list()
+ for prio, add_to_dim in enumerate(DET_RESP_AGGR):
+ self.detailed_chart.append('CHART %s.detailed_response_codes%s ""'
+ ' "Detailed Response Codes %s" requests/s responses'
+ ' web_log.detailed_response_codes%s stacked %s %s\n'
+ % (job_name, add_to_dim, add_to_dim[1:], add_to_dim,
+ str(prio), self.update_every))
+
+ codes = DET_RESP_AGGR[:1] if self.detailed_response_aggregate else DET_RESP_AGGR[1:]
+ for code in codes:
+ self.order.append('detailed_response_codes%s' % code)
+ self.definitions['detailed_response_codes%s' % code] = {'options':
+ [None,
+ 'Detailed Response Codes %s' % code[1:],
+ 'requests/s',
+ 'responses',
+ 'web_log.detailed_response_codes%s' % code,
+ 'stacked'],
+ 'lines': []}
+
+ # Add 'requests_per_url' chart if specified in the configuration
+ if self.url_pattern:
+ self.definitions['requests_per_url'] = {'options': [None, 'Requests Per Url', 'requests/s',
+ 'urls', 'web_log.requests_per_url', 'stacked'],
+ 'lines': [['rpu_other', 'other', 'incremental']]}
+ for elem in self.url_pattern:
+ self.definitions['requests_per_url']['lines'].append([elem.description, elem.description[4:],
+ 'incremental'])
+ self.data.update({elem.description: 0})
+ self.data.update({'rpu_other': 0})
+ else:
+ self.order.remove('requests_per_url')
+
+ def add_new_dimension(self, dimension, line_list, chart_string, key):
+ """
+ :param dimension: str: response status code. Ex.: '202', '499'
+ :param line_list: list: Ex.: ['202', '202', 'incremental']
+ :param chart_string: Current string we need to pass to netdata to rebuild the chart
+ :param key: str: CHARTS dict key (chart name). Ex.: 'response_time'
+ :return: str: new chart string = previous + new dimensions
+ """
+ self.data.update({dimension: 0})
+ # SET method check if dim in _dimensions
+ self._dimensions.append(dimension)
+ # UPDATE method do SET only if dim in definitions
+ self.definitions[key]['lines'].append(line_list)
+ chart = chart_string
+ chart += "%s %s\n" % ('DIMENSION', ' '.join(line_list))
+ print(chart)
+ return chart
+
+ def _get_access_data(self):
+ """
+ Parse new log lines
+ :return: dict OR None
+ None if _get_raw_data method fails.
+ In all other cases - dict.
+ """
+ raw = self._get_raw_data()
+ if raw is None:
+ return None
+
+ request_time, unique_current = list(), list()
+ request_counter = {'count': 0, 'sum': 0}
+ ip_address_counter = {'unique_cur_ip': 0}
+ for line in raw:
+ match = self.regex.search(line)
+ if match:
+ match_dict = match.groupdict()
+ try:
+ code = ''.join([match_dict['code'][0], 'xx'])
+ self.data[code] += 1
+ except KeyError:
+ self.data['0xx'] += 1
+ # detailed response code
+ if self.detailed_response_codes:
+ self._get_data_detailed_response_codes(match_dict['code'])
+ # response statuses
+ self._get_data_statuses(match_dict['code'])
+ # requests per url
+ if self.url_pattern:
+ self._get_data_per_url(match_dict['url'])
+ # requests per http method
+ self._get_data_http_method(match_dict['method'])
+ # requests per http version
+ if 'http_version' in match_dict:
+ self._get_data_http_version(match_dict['http_version'])
+ # bandwidth sent
+ bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
+ self.data['bytes_sent'] += int(bytes_sent)
+ # request processing time and bandwidth received
+ if 'resp_length' in match_dict:
+ self.data['resp_length'] += int(match_dict['resp_length'])
+ if 'resp_time' in match_dict:
+ resp_time = self.resp_time_func(float(match_dict['resp_time']))
+ bisect.insort_left(request_time, resp_time)
+ request_counter['count'] += 1
+ request_counter['sum'] += resp_time
+ # requests per ip proto
+ proto = 'ipv4' if '.' in match_dict['address'] else 'ipv6'
+ self.data['req_' + proto] += 1
+ # unique clients ips
+ if address_not_in_pool(self.unique_all_time, match_dict['address'],
+ self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+ if address_not_in_pool(unique_current, match_dict['address'], ip_address_counter['unique_cur_ip']):
+ self.data['unique_cur_' + proto] += 1
+ ip_address_counter['unique_cur_ip'] += 1
+ else:
+ self.data['unmatched'] += 1
+
+ # timings
+ if request_time:
+ self.data['resp_time_min'] += int(request_time[0])
+ self.data['resp_time_avg'] += int(round(float(request_counter['sum']) / request_counter['count']))
+ self.data['resp_time_max'] += int(request_time[-1])
+ return self.data
+
+ def _get_data_detailed_response_codes(self, code):
+ """
+ :param code: str: CODE from parsed line. Ex.: '202, '499'
+ :return:
+ Calls add_new_dimension method If the value is found for the first time
+ """
+ if code not in self.data:
+ if self.detailed_response_aggregate:
+ chart_string_copy = self.detailed_chart[0]
+ self.detailed_chart[0] = self.add_new_dimension(code, [code, code, 'incremental'],
+ chart_string_copy, 'detailed_response_codes')
+ else:
+ code_index = int(code[0]) if int(code[0]) < 6 else 6
+ chart_string_copy = self.detailed_chart[code_index]
+ chart_name = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
+ self.detailed_chart[code_index] = self.add_new_dimension(code, [code, code, 'incremental'],
+ chart_string_copy, chart_name)
+ self.data[code] += 1
+
+ def _get_data_http_method(self, method):
+ """
+ :param method: str: METHOD from parsed line. Ex.: 'GET', 'POST'
+ :return:
+ Calls add_new_dimension method If the value is found for the first time
+ """
+ if method not in self.data:
+ chart_string_copy = self.http_method_chart
+ self.http_method_chart = self.add_new_dimension(method, [method, method, 'incremental'],
+ chart_string_copy, 'http_method')
+ self.data[method] += 1
+
+ def _get_data_http_version(self, http_version):
+ """
+ :param http_version: str: METHOD from parsed line. Ex.: '1.1', '1.0'
+ :return:
+ Calls add_new_dimension method If the value is found for the first time
+ """
+ http_version_dim_id = http_version.replace('.', '_')
+ if http_version_dim_id not in self.data:
+ chart_string_copy = self.http_version_chart
+ self.http_version_chart = self.add_new_dimension(http_version_dim_id,
+ [http_version_dim_id, http_version, 'incremental'],
+ chart_string_copy, 'http_version')
+ self.data[http_version_dim_id] += 1
+
+ def _get_data_per_url(self, url):
+ """
+ :param url: str: URL from parsed line
+ :return:
+ Scan through string looking for the first location where patterns produce a match for all user
+ defined patterns
+ """
+ match = None
+ for elem in self.url_pattern:
+ if elem.pattern.search(url):
+ self.data[elem.description] += 1
+ match = True
+ break
+ if not match:
+ self.data['rpu_other'] += 1
+
+ def _get_data_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+
+def address_not_in_pool(pool, address, pool_size):
+ """
+ :param pool: list of ip addresses
+ :param address: ip address
+ :param pool_size: current pool size
+ :return: True if address not in pool. False if address in pool.
+ """
+ index = bisect.bisect_left(pool, address)
+ if index < pool_size:
+ if pool[index] == address:
+ return False
+ else:
+ bisect.insort_left(pool, address)
+ return True
+ else:
+ bisect.insort_left(pool, address)
+ return True
+
+
+def find_regex_return(match_dict=None, msg='Generic error message'):
+ """
+ :param match_dict: dict: re.search.groupdict() or None
+ :param msg: str: error description
+ :return: tuple:
+ """
+ return match_dict, msg
+
+
+def check_req_per_url_pattern(string, url_pattern):
+ """
+ :param string: str:
+ :param url_pattern: dict: ex. {'dim1': 'pattern1>', 'dim2': '<pattern2>'}
+ :return: list of named tuples or None:
+ We need to make sure all patterns are valid regular expressions
+ """
+ if not hasattr(url_pattern, 'keys'):
+ return None
+
+ result = list()
+
+ def is_valid_pattern(pattern):
+ """
+ :param pattern: str
+ :return: re.compile(pattern) or None
+ """
+ if not isinstance(pattern, str):
+ return False
+ else:
+ try:
+ compile_pattern = re.compile(pattern)
+ except re.error:
+ return False
+ else:
+ return compile_pattern
+
+ for dimension, regex in url_pattern.items():
+ valid_pattern = is_valid_pattern(regex)
+ if isinstance(dimension, str) and valid_pattern:
+ result.append(NAMED_URL_PATTERN(description='_'.join([string, dimension]), pattern=valid_pattern))
+
+ return result or None