summaryrefslogtreecommitdiffstats
path: root/python.d
diff options
context:
space:
mode:
authorLennart Weller <lhw@ring0.de>2017-07-27 09:55:55 +0000
committerLennart Weller <lhw@ring0.de>2017-07-27 09:55:55 +0000
commit1413c8953bf9ab447967fedb6246c03afa3f788e (patch)
tree9bde7ef5a8010d08c0723badb22a24bca8926834 /python.d
parentRelease v. 1.6.0+dfsg-3 to Unstable (diff)
parentNew upstream version 1.7.0+dfsg (diff)
downloadnetdata-1413c8953bf9ab447967fedb6246c03afa3f788e.tar.xz
netdata-1413c8953bf9ab447967fedb6246c03afa3f788e.zip
Updated version 1.7.0+dfsg from 'upstream/1.7.0+dfsg'
with Debian dir e101ee6549c6786aab4b3f5c4f9bbb7638f17e34
Diffstat (limited to 'python.d')
-rw-r--r--python.d/Makefile.am4
-rw-r--r--python.d/Makefile.in144
-rw-r--r--python.d/README.md170
-rw-r--r--python.d/apache.chart.py72
-rw-r--r--python.d/bind_rndc.chart.py319
-rw-r--r--python.d/cpufreq.chart.py2
-rw-r--r--python.d/dns_query_time.chart.py135
-rw-r--r--python.d/elasticsearch.chart.py198
-rw-r--r--python.d/fail2ban.chart.py267
-rw-r--r--python.d/go_expvar.chart.py228
-rw-r--r--python.d/haproxy.chart.py254
-rw-r--r--python.d/isc_dhcpd.chart.py190
-rw-r--r--python.d/mdstat.chart.py167
-rw-r--r--python.d/mongodb.chart.py53
-rw-r--r--python.d/mysql.chart.py10
-rw-r--r--python.d/ovpn_status_log.chart.py94
-rw-r--r--python.d/postgres.chart.py12
-rw-r--r--python.d/python_modules/base.py211
-rw-r--r--python.d/rabbitmq.chart.py187
-rw-r--r--python.d/redis.chart.py5
-rw-r--r--python.d/samba.chart.py124
-rw-r--r--python.d/smartd_log.chart.py13
-rw-r--r--python.d/web_log.chart.py1179
23 files changed, 2844 insertions, 1194 deletions
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
index bfe28ff2..43f25cff 100644
--- a/python.d/Makefile.am
+++ b/python.d/Makefile.am
@@ -18,12 +18,14 @@ dist_python_DATA = \
bind_rndc.chart.py \
cpufreq.chart.py \
cpuidle.chart.py \
+ dns_query_time.chart.py \
dovecot.chart.py \
elasticsearch.chart.py \
example.chart.py \
exim.chart.py \
fail2ban.chart.py \
freeradius.chart.py \
+ go_expvar.chart.py \
haproxy.chart.py \
hddtemp.chart.py \
ipfs.chart.py \
@@ -38,8 +40,10 @@ dist_python_DATA = \
phpfpm.chart.py \
postfix.chart.py \
postgres.chart.py \
+ rabbitmq.chart.py \
redis.chart.py \
retroshare.chart.py \
+ samba.chart.py \
sensors.chart.py \
squid.chart.py \
smartd_log.chart.py \
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
index 9b784668..33efd42d 100644
--- a/python.d/Makefile.in
+++ b/python.d/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -35,10 +79,10 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-DIST_COMMON = $(dist_python_DATA) $(dist_python_SCRIPTS) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
+ $(dist_python_DATA) $(dist_pythonmodules_DATA) \
+ $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
subdir = python.d
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
@@ -86,13 +130,32 @@ am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythondir)" \
"$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \
"$(DESTDIR)$(pythonyaml3dir)"
SCRIPTS = $(dist_python_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_python_DATA) $(dist_pythonmodules_DATA) \
$(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -251,12 +314,14 @@ dist_python_DATA = \
bind_rndc.chart.py \
cpufreq.chart.py \
cpuidle.chart.py \
+ dns_query_time.chart.py \
dovecot.chart.py \
elasticsearch.chart.py \
example.chart.py \
exim.chart.py \
fail2ban.chart.py \
freeradius.chart.py \
+ go_expvar.chart.py \
haproxy.chart.py \
hddtemp.chart.py \
ipfs.chart.py \
@@ -271,8 +336,10 @@ dist_python_DATA = \
phpfpm.chart.py \
postfix.chart.py \
postgres.chart.py \
+ rabbitmq.chart.py \
redis.chart.py \
retroshare.chart.py \
+ samba.chart.py \
sensors.chart.py \
squid.chart.py \
smartd_log.chart.py \
@@ -368,8 +435,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
@$(NORMAL_INSTALL)
- test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)"
@list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
@@ -400,8 +470,11 @@ uninstall-dist_pythonSCRIPTS:
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
install-dist_pythonDATA: $(dist_python_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)"
@list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -418,8 +491,11 @@ uninstall-dist_pythonDATA:
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonmodulesdir)" || $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)"
@list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -436,8 +512,11 @@ uninstall-dist_pythonmodulesDATA:
dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonyaml2dir)" || $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)"
@list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -454,8 +533,11 @@ uninstall-dist_pythonyaml2DATA:
dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonyaml3dir)" || $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)"
@list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -470,11 +552,11 @@ uninstall-dist_pythonyaml3DATA:
@list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -618,20 +700,20 @@ uninstall-am: uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
- install-dist_pythonyaml3DATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am uninstall uninstall-am \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_pythonmodulesDATA \
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pythonDATA \
+ install-dist_pythonSCRIPTS install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_pythonDATA \
+ uninstall-dist_pythonSCRIPTS uninstall-dist_pythonmodulesDATA \
uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA
.in:
diff --git a/python.d/README.md b/python.d/README.md
index 7df6e3e8..c4504a7c 100644
--- a/python.d/README.md
+++ b/python.d/README.md
@@ -228,6 +228,16 @@ It produces one stacked chart per CPU, showing the percentage of time spent in
each state.
---
+# dns_query_time
+
+This module provides dns query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per dns server, showing the query time.
+
+---
# dovecot
@@ -473,6 +483,44 @@ and restart/reload your FREERADIUS server.
---
+# go_expvar
+
+---
+
+The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library.
+
+`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications) for more info.
+
+For the memory statistics, it produces the following charts:
+
+1. **Heap allocations** in kB
+ * alloc: size of objects allocated on the heap
+ * inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+ * inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+ * inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+ * inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+ * sys: size of reserved virtual address space
+
+6. **Live objects**
+ * live: number of live objects in memory
+
+7. **GC pauses average** in ns
+ * avg: average duration of all GC stop-the-world pauses
+
+### configuration
+
+Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration.
+
+---
+
# haproxy
Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
@@ -1198,6 +1246,60 @@ When no configuration file is found, module tries to connect to TCP/IP socket: `
---
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Memory**
+ * free memory in megabytes
+
+8. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
+
# redis
Get INFO data from redis instance.
@@ -1241,6 +1343,68 @@ When no configuration file is found, module tries to connect to TCP/IP socket: `
---
+# samba
+
+Performance metrics of Samba file sharing.
+
+It produces the following charts:
+
+1. **Syscall R/Ws** in kilobytes/s
+ * sendfile
+ * recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+ * readout
+ * writein
+ * readin
+ * writeout
+
+3. **Smb2 Create/Close** in operations/s
+ * create
+ * close
+
+4. **Smb2 Info** in operations/s
+ * getinfo
+ * setinfo
+
+5. **Smb2 Find** in operations/s
+ * find
+
+6. **Smb2 Notify** in operations/s
+ * notify
+
+7. **Smb2 Lesser Ops** as counters
+ * tcon
+ * negprot
+ * tdis
+ * cancel
+ * logoff
+ * flush
+ * lock
+ * keepalive
+ * break
+ * sessetup
+
+### configuration
+
+Requires that smbd has been compiled with profiling enabled. Also required
+that `smbd` was started either with the `-P 1` option or inside `smb.conf`
+using `smbd profiling level`.
+
+This plugin uses `smbstatus -P` which can only be executed by root. It uses
+sudo and assumes that it is configured such that the `netdata` user can
+execute smbstatus as root without password.
+
+For example:
+
+ netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+
+```yaml
+update_every : 5 # update frequency
+```
+
+---
+
# sensors
System sensors information.
@@ -1251,6 +1415,12 @@ Charts are created dynamically.
For detailed configuration information please read [`sensors.conf`](https://github.com/firehol/netdata/blob/master/conf.d/python.d/sensors.conf) file.
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/firehol/netdata/issues/827).
+Please join this discussion for help.
+
---
# squid
diff --git a/python.d/apache.chart.py b/python.d/apache.chart.py
index 2e4d16dd..71fe0300 100644
--- a/python.d/apache.chart.py
+++ b/python.d/apache.chart.py
@@ -22,7 +22,8 @@ ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec'
CHARTS = {
'bytesperreq': {
- 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request', 'statistics', 'apache.bytesperreq', 'area'],
+ 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+ 'statistics', 'apache.bytesperreq', 'area'],
'lines': [
["size_req"]
]},
@@ -30,15 +31,19 @@ CHARTS = {
'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [
["idle"],
- ["busy"]
+ ["idle_servers", 'idle'],
+ ["busy"],
+ ["busy_servers", 'busy']
]},
'reqpersec': {
- 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics', 'apache.reqpersec', 'area'],
+ 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'apache.reqpersec', 'area'],
'lines': [
["requests_sec"]
]},
'bytespersec': {
- 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobytes/s', 'statistics', 'apache.bytesperreq', 'area'],
+ 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobytes/s', 'statistics',
+ 'apache.bytesperreq', 'area'],
'lines': [
["size_sec", None, 'absolute', 1, 1000]
]},
@@ -66,43 +71,54 @@ CHARTS = {
]}
}
+ASSIGNMENT = {"BytesPerReq": 'size_req',
+ "IdleWorkers": 'idle',
+ "IdleServers": 'idle_servers',
+ "BusyWorkers": 'busy',
+ "BusyServers": 'busy_servers',
+ "ReqPerSec": 'requests_sec',
+ "BytesPerSec": 'size_sec',
+ "Total Accesses": 'requests',
+ "Total kBytes": 'sent',
+ "ConnsTotal": 'connections',
+ "ConnsAsyncKeepAlive": 'keepalive',
+ "ConnsAsyncClosing": 'closing',
+ "ConnsAsyncWriting": 'writing'}
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- if len(self.url) == 0:
- self.url = "http://localhost/server-status?auto"
self.order = ORDER
self.definitions = CHARTS
- self.assignment = {"BytesPerReq": 'size_req',
- "IdleWorkers": 'idle',
- "BusyWorkers": 'busy',
- "ReqPerSec": 'requests_sec',
- "BytesPerSec": 'size_sec',
- "Total Accesses": 'requests',
- "Total kBytes": 'sent',
- "ConnsTotal": 'connections',
- "ConnsAsyncKeepAlive": 'keepalive',
- "ConnsAsyncClosing": 'closing',
- "ConnsAsyncWriting": 'writing'}
+ self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
+
+ def check(self):
+ if UrlService.check(self):
+ if 'idle_servers' in self._data_from_check:
+ self.__module__ = 'lighttpd'
+ for chart in self.definitions:
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+ return True
+ return False
def _get_data(self):
"""
Format data received from http request
:return: dict
"""
- try:
- raw = self._get_raw_data().split("\n")
- except AttributeError:
+ raw_data = self._get_raw_data()
+ if not raw_data:
return None
- data = {}
- for row in raw:
+ data = dict()
+
+ for row in raw_data.split('\n'):
tmp = row.split(":")
- if str(tmp[0]) in self.assignment:
+ if tmp[0] in ASSIGNMENT:
try:
- data[self.assignment[tmp[0]]] = int(float(tmp[1]))
+ data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
except (IndexError, ValueError):
- pass
- if len(data) == 0:
- return None
- return data
+ continue
+ return data or None
diff --git a/python.d/bind_rndc.chart.py b/python.d/bind_rndc.chart.py
index a4d75370..5a974928 100644
--- a/python.d/bind_rndc.chart.py
+++ b/python.d/bind_rndc.chart.py
@@ -2,100 +2,141 @@
# Description: bind rndc netdata python.d module
# Author: l2isbad
-from base import SimpleService
-from re import compile, findall
-from os.path import getsize, split
-from os import access as is_accessible, R_OK
+from os.path import getsize
+from os import access, R_OK
from subprocess import Popen
+from collections import defaultdict
+from base import SimpleService
priority = 60000
retries = 60
update_every = 30
-NMS = ['requests', 'responses', 'success', 'auth_answer', 'nonauth_answer', 'nxrrset', 'failure',
- 'nxdomain', 'recursion', 'duplicate', 'rejections']
-QUERIES = ['RESERVED0', 'A', 'NS', 'CNAME', 'SOA', 'PTR', 'MX', 'TXT', 'X25', 'AAAA', 'SRV', 'NAPTR',
- 'A6', 'DS', 'RRSIG', 'DNSKEY', 'SPF', 'ANY', 'DLV']
+ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+
+CHARTS = {
+ 'name_server_statistics': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
+ 'bind_rndc.name_server_statistics', 'line'],
+ 'lines': [
+ ['nms_requests', 'requests', 'incremental'],
+ ['nms_rejected_queries', 'rejected_queries', 'incremental'],
+ ['nms_success', 'success', 'incremental'],
+ ['nms_failure', 'failure', 'incremental'],
+ ['nms_responses', 'responses', 'incremental'],
+ ['nms_duplicate', 'duplicate', 'incremental'],
+ ['nms_recursion', 'recursion', 'incremental'],
+ ['nms_nxrrset', 'nxrrset', 'incremental'],
+ ['nms_nxdomain', 'nxdomain', 'incremental'],
+ ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
+ ['nms_auth_answer', 'auth_answer', 'incremental'],
+ ['nms_dropped_queries', 'dropped_queries', 'incremental'],
+ ]},
+ 'incoming_queries': {
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries',
+ 'bind_rndc.incoming_queries', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_queries': {
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries',
+ 'bind_rndc.outgoing_queries', 'line'],
+ 'lines': [
+ ]},
+ 'named_stats_size': {
+ 'options': [None, 'Named Stats File Size', 'MB', 'file size',
+ 'bind_rndc.stats_size', 'line'],
+ 'lines': [
+ ['stats_size', None, 'absolute', 1, 1 << 20]
+ ]}
+}
+
+NMS = {
+ 'nms_requests':
+ ['IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'],
+ 'nms_responses':
+ ['responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'],
+ 'nms_failure':
+ ['other query failures',
+ 'queries resulted in SERVFAIL'],
+ 'nms_auth_answer':
+ ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer':
+ ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset':
+ ['queries resulted in nxrrset'],
+ 'nms_success':
+ ['queries resulted in successful answer'],
+ 'nms_nxdomain':
+ ['queries resulted in NXDOMAIN'],
+ 'nms_recursion':
+ ['queries caused recursion'],
+ 'nms_duplicate':
+ ['duplicate queries received'],
+ 'nms_rejected_queries':
+ ['auth queries rejected',
+ 'recursive queries rejected'],
+ 'nms_dropped_queries':
+ ['queries dropped']
+}
+
+STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
- self.regex_values = compile(r'([0-9]+) ([^\n]+)')
- # self.options = ['Incoming Requests', 'Incoming Queries', 'Outgoing Queries',
- # 'Name Server Statistics', 'Zone Maintenance Statistics', 'Resolver Statistics',
- # 'Cache DB RRsets', 'Socket I/O Statistics']
- self.options = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
- self.regex_options = [r'(%s(?= \+\+)) \+\+([^\+]+)' % option for option in self.options]
self.rndc = self.find_binary('rndc')
+ self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
+ nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
+ nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
+ nms_dropped_queries=0)
def check(self):
- # We cant start without 'rndc' command
if not self.rndc:
self.error('Can\'t locate \'rndc\' binary or binary is not executable by netdata')
return False
- # We cant start if stats file is not exist or not readable by netdata user
- if not is_accessible(self.named_stats_path, R_OK):
+ if not access(self.named_stats_path, R_OK):
self.error('Cannot access file %s' % self.named_stats_path)
return False
- size_before = getsize(self.named_stats_path)
run_rndc = Popen([self.rndc, 'stats'], shell=False)
run_rndc.wait()
- size_after = getsize(self.named_stats_path)
- # We cant start if netdata user has no permissions to run 'rndc stats'
if not run_rndc.returncode:
- # 'rndc' was found, stats file is exist and readable and we can run 'rndc stats'. Lets go!
- self.create_charts()
-
- # BIND APPEND dump on every run 'rndc stats'
- # that is why stats file size can be VERY large if update_interval too small
- dump_size_24hr = round(86400 / self.update_every * (int(size_after) - int(size_before)) / 1048576, 3)
-
- # If update_every too small we should WARN user
- if self.update_every < 30:
- self.info('Update_every %s is NOT recommended for use. Increase the value to > 30' % self.update_every)
-
- self.info('With current update_interval it will be + %s MB every 24hr. '
- 'Don\'t forget to create logrotate conf file for %s' % (dump_size_24hr, self.named_stats_path))
-
- self.info('Plugin was started successfully.')
-
return True
- else:
- self.error('Not enough permissions to run "%s stats"' % self.rndc)
- return False
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
def _get_raw_data(self):
"""
Run 'rndc stats' and read last dump from named.stats
- :return: tuple(
- file.read() obj,
- named.stats file size
- )
+ :return: dict
"""
+ result = dict()
try:
current_size = getsize(self.named_stats_path)
- except OSError:
- return None, None
-
- run_rndc = Popen([self.rndc, 'stats'], shell=False)
- run_rndc.wait()
-
- if run_rndc.returncode:
- return None, None
-
- try:
- with open(self.named_stats_path) as bind_rndc:
- bind_rndc.seek(current_size)
- result = bind_rndc.read()
- except OSError:
- return None, None
- else:
- return result, current_size
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None
+ with open(self.named_stats_path) as named_stats:
+ named_stats.seek(current_size)
+ result['stats'] = named_stats.readlines()
+ result['size'] = current_size
+ return result
+ except (OSError, IOError):
+ return None
def _get_data(self):
"""
@@ -103,72 +144,98 @@ class Service(SimpleService):
:return: dict
"""
- raw_data, size = self._get_raw_data()
+ raw_data = self._get_raw_data()
if raw_data is None:
return None
-
- rndc_stats = dict()
-
- # Result: dict.
- # topic = Cache DB RRsets; body = A 178303 NS 86790 ... ; desc = A; value = 178303
- # {'Cache DB RRsets': [('A', 178303), ('NS', 286790), ...],
- # {Incoming Queries': [('RESERVED0', 8), ('A', 4557317680), ...],
- # ......
- for regex in self.regex_options:
- rndc_stats.update(dict([(topic, [(desc, int(value)) for value, desc in self.regex_values.findall(body)])
- for topic, body in findall(regex, raw_data)]))
-
- nms = dict(rndc_stats.get('Name Server Statistics', []))
-
- inc_queries = dict([('i' + k, 0) for k in QUERIES])
- inc_queries.update(dict([('i' + k, v) for k, v in rndc_stats.get('Incoming Queries', [])]))
- out_queries = dict([('o' + k, 0) for k in QUERIES])
- out_queries.update(dict([('o' + k, v) for k, v in rndc_stats.get('Outgoing Queries', [])]))
-
- to_netdata = dict()
- to_netdata['requests'] = sum([v for k, v in nms.items() if 'request' in k and 'received' in k])
- to_netdata['responses'] = sum([v for k, v in nms.items() if 'responses' in k and 'sent' in k])
- to_netdata['success'] = nms.get('queries resulted in successful answer', 0)
- to_netdata['auth_answer'] = nms.get('queries resulted in authoritative answer', 0)
- to_netdata['nonauth_answer'] = nms.get('queries resulted in non authoritative answer', 0)
- to_netdata['nxrrset'] = nms.get('queries resulted in nxrrset', 0)
- to_netdata['failure'] = sum([nms.get('queries resulted in SERVFAIL', 0), nms.get('other query failures', 0)])
- to_netdata['nxdomain'] = nms.get('queries resulted in NXDOMAIN', 0)
- to_netdata['recursion'] = nms.get('queries caused recursion', 0)
- to_netdata['duplicate'] = nms.get('duplicate queries received', 0)
- to_netdata['rejections'] = nms.get('recursive queries rejected', 0)
- to_netdata['stats_size'] = size
-
- to_netdata.update(inc_queries)
- to_netdata.update(out_queries)
- return to_netdata
-
- def create_charts(self):
- self.order = ['stats_size', 'bind_stats', 'incoming_q', 'outgoing_q']
- self.definitions = {
- 'bind_stats': {
- 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics', 'bind_rndc.stats', 'line'],
- 'lines': [
- ]},
- 'incoming_q': {
- 'options': [None, 'Incoming queries', 'queries','incoming queries', 'bind_rndc.incq', 'line'],
- 'lines': [
- ]},
- 'outgoing_q': {
- 'options': [None, 'Outgoing queries', 'queries','outgoing queries', 'bind_rndc.outq', 'line'],
- 'lines': [
- ]},
- 'stats_size': {
- 'options': [None, '%s file size' % split(self.named_stats_path)[1].capitalize(), 'megabytes',
- '%s size' % split(self.named_stats_path)[1], 'bind_rndc.size', 'line'],
- 'lines': [
- ["stats_size", None, "absolute", 1, 1048576]
- ]}
- }
- for elem in QUERIES:
- self.definitions['incoming_q']['lines'].append(['i' + elem, elem, 'incremental'])
- self.definitions['outgoing_q']['lines'].append(['o' + elem, elem, 'incremental'])
-
- for elem in NMS:
- self.definitions['bind_stats']['lines'].append([elem, None, 'incremental'])
+ parsed = dict()
+ for stat in STATS:
+ parsed[stat] = parse_stats(field=stat,
+ named_stats=raw_data['stats'])
+
+ self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
+
+ for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
+ parsed_key, chart_name = elem[0], elem[1]
+ for dimension_id, value in queries_mapper(data=parsed[parsed_key],
+ add=chart_name[:9]).items():
+ if dimension_id not in self.data:
+ dimension = dimension_id.replace(chart_name[:9], '')
+ self._add_new_dimension(dimension_id=dimension_id,
+ dimension=dimension,
+ chart_name=chart_name,
+ priority=self.priority + self.order.index(chart_name))
+ self.data[dimension_id] = value
+
+ self.data['stats_size'] = raw_data['size']
+ return self.data
+
+
+def parse_stats(field, named_stats):
+ """
+ :param field: str:
+ :param named_stats: list:
+ :return: dict
+
+ Example:
+ filed: 'Incoming Queries'
+ names_stats (list of lines):
+ ++ Incoming Requests ++
+ 1405660 QUERY
+ 3 NOTIFY
+ ++ Incoming Queries ++
+ 1214961 A
+ 75 NS
+ 2 CNAME
+ 2897 SOA
+ 35544 PTR
+ 14 MX
+ 5822 TXT
+ 145974 AAAA
+ 371 SRV
+ ++ Outgoing Queries ++
+ ...
+
+ result:
+ {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
+ """
+ data = dict()
+ ns = iter(named_stats)
+ for line in ns:
+ if field not in line:
+ continue
+ while True:
+ try:
+ line = next(ns)
+ except StopIteration:
+ break
+ if '++' not in line:
+ if '[' in line:
+ continue
+ v, k = line.strip().split(' ', 1)
+ data[k] = int(v)
+ continue
+ break
+ break
+ return data
+
+
+def nms_mapper(data):
+ """
+ :param data: dict
+ :return: dict(defaultdict)
+ """
+ result = defaultdict(int)
+ for k, v in NMS.items():
+ for elem in v:
+ result[k] += data.get(elem, 0)
+ return result
+
+
+def queries_mapper(data, add):
+ """
+ :param data: dict
+ :param add: str
+ :return: dict
+ """
+ return dict([(add + k, v) for k, v in data.items()])
diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py
index e28bdea8..d5544b7b 100644
--- a/python.d/cpufreq.chart.py
+++ b/python.d/cpufreq.chart.py
@@ -100,7 +100,7 @@ class Service(SimpleService):
self.error("couldn't find a method to read cpufreq statistics")
return False
- for name in self.assignment.keys():
+ for name in sorted(self.assignment, key=lambda v: int(v[3:])):
self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
return True
diff --git a/python.d/dns_query_time.chart.py b/python.d/dns_query_time.chart.py
new file mode 100644
index 00000000..9053d9a1
--- /dev/null
+++ b/python.d/dns_query_time.chart.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# Description: dns_query_time netdata python.d module
+# Author: l2isbad
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+try:
+ import dns.message, dns.query, dns.name
+ DNS_PYTHON = True
+except ImportError:
+ DNS_PYTHON = False
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from random import choice
+from threading import Thread
+from socket import gethostbyname, gaierror
+from base import SimpleService
+
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.timeout = self.configuration.get('response_timeout', 4)
+ self.aggregate = self.configuration.get('aggregate', True)
+ self.domains = self.configuration.get('domains')
+ self.server_list = self.configuration.get('dns_servers')
+
+ def check(self):
+ if not DNS_PYTHON:
+ self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+ return False
+
+ self.timeout = self.timeout if isinstance(self.timeout, int) else 4
+ self.update_every = self.timeout + 1 if self.update_every <= self.timeout else self.update_every
+
+ if not all([self.domains, self.server_list,
+ isinstance(self.server_list, str), isinstance(self.domains, str)]):
+ self.error('server_list and domain_list can\'t be empty')
+ return False
+ else:
+ self.domains, self.server_list = self.domains.split(), self.server_list.split()
+
+ for ns in self.server_list:
+ if not check_ns(ns):
+ self.info('Bad NS: %s' % ns)
+ self.server_list.remove(ns)
+ if not self.server_list:
+ return False
+
+ data = self._get_data(timeout=1)
+
+ down_servers = [s for s in data if data[s] == -100]
+ for down in down_servers:
+ down = down[3:].replace('_', '.')
+ self.info('Removed due to non response %s' % down)
+ self.server_list.remove(down)
+ if not self.server_list:
+ return False
+
+ self._data_from_check = data
+ self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
+ self.info(str({'domains': len(self.domains), 'servers': self.server_list}))
+ return True
+
+ def _get_data(self, timeout=None):
+ return dns_request(self.server_list, timeout or self.timeout, self.domains)
+
+
+def dns_request(server_list, timeout, domains):
+ threads = list()
+ que = Queue()
+ result = dict()
+
+ def dns_req(ns, t, q):
+ domain = dns.name.from_text(choice(domains))
+ request = dns.message.make_query(domain, dns.rdatatype.A)
+
+ try:
+ dns_start = time()
+ dns.query.udp(request, ns, timeout=t)
+ dns_end = time()
+ query_time = round((dns_end - dns_start) * 1000)
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
+ except dns.exception.Timeout:
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): -100})
+
+ for server in server_list:
+ th = Thread(target=dns_req, args=(server, timeout, que))
+ th.start()
+ threads.append(th)
+
+ for th in threads:
+ th.join()
+ result.update(que.get())
+
+ return result
+
+
+def check_ns(ns):
+ try:
+ return gethostbyname(ns)
+ except gaierror:
+ return False
+
+
+def create_charts(aggregate, server_list):
+ if aggregate:
+ order = ['dns_group']
+ definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers',
+ 'dns_query_time.response_time', 'line'], 'lines': []}}
+ for ns in server_list:
+ definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+
+ return order, definitions
+ else:
+ order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
+ definitions = dict()
+ for ns in server_list:
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns,
+ 'dns_query_time.response_time', 'area'],
+ 'lines': [['_'.join(['ns', ns.replace('.', '_')]),
+ ns, 'absolute']]}
+ return order, definitions
diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py
index 430227f6..9ec08719 100644
--- a/python.d/elasticsearch.chart.py
+++ b/python.d/elasticsearch.chart.py
@@ -85,6 +85,21 @@ HEALTH_STATS = [
('active_shards_percent_as_number', 'health_active_shards_percent_as_number', None)
]
+LATENCY = {
+ 'query_latency':
+ {'total': 'query_total',
+ 'spent_time': 'query_time_in_millis'},
+ 'fetch_latency':
+ {'total': 'fetch_total',
+ 'spent_time': 'fetch_time_in_millis'},
+ 'indexing_latency':
+ {'total': 'indexing_index_total',
+ 'spent_time': 'indexing_index_time_in_millis'},
+ 'flushing_latency':
+ {'total': 'flush_total',
+ 'spent_time': 'flush_total_time_in_millis'}
+}
+
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['search_perf_total', 'search_perf_current', 'search_perf_time', 'search_latency', 'index_perf_total',
'index_perf_current', 'index_perf_time', 'index_latency', 'jvm_mem_heap', 'jvm_gc_count',
@@ -95,34 +110,34 @@ ORDER = ['search_perf_total', 'search_perf_current', 'search_perf_time', 'search
CHARTS = {
'search_perf_total': {
- 'options': [None, 'Total number of queries, fetches', 'number of', 'search performance',
+ 'options': [None, 'Queries And Fetches', 'number of', 'search performance',
'es.search_query_total', 'stacked'],
'lines': [
['query_total', 'queries', 'incremental'],
['fetch_total', 'fetches', 'incremental']
]},
'search_perf_current': {
- 'options': [None, 'Number of queries, fetches in progress', 'number of', 'search performance',
+ 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
'es.search_query_current', 'stacked'],
'lines': [
['query_current', 'queries', 'absolute'],
['fetch_current', 'fetches', 'absolute']
]},
'search_perf_time': {
- 'options': [None, 'Time spent on queries, fetches', 'seconds', 'search performance',
+ 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
'es.search_time', 'stacked'],
'lines': [
['query_time_in_millis', 'query', 'incremental', 1, 1000],
['fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
]},
'search_latency': {
- 'options': [None, 'Query and fetch latency', 'ms', 'search performance', 'es.search_latency', 'stacked'],
+ 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'es.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
]},
'index_perf_total': {
- 'options': [None, 'Total number of documents indexed, index refreshes, index flushes to disk', 'number of',
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
'indexing performance', 'es.index_performance_total', 'stacked'],
'lines': [
['indexing_index_total', 'indexed', 'incremental'],
@@ -130,13 +145,13 @@ CHARTS = {
['flush_total', 'flushes', 'incremental']
]},
'index_perf_current': {
- 'options': [None, 'Number of documents currently being indexed', 'currently indexed',
+ 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
'indexing performance', 'es.index_performance_current', 'stacked'],
'lines': [
['indexing_index_current', 'documents', 'absolute']
]},
'index_perf_time': {
- 'options': [None, 'Time spent on indexing, refreshing, flushing', 'seconds', 'indexing performance',
+ 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
'es.search_time', 'stacked'],
'lines': [
['indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
@@ -144,33 +159,33 @@ CHARTS = {
['flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
]},
'index_latency': {
- 'options': [None, 'Indexing and flushing latency', 'ms', 'indexing performance',
+ 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
'es.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
['flushing_latency', 'flushing', 'absolute', 1, 1000]
]},
'jvm_mem_heap': {
- 'options': [None, 'JVM heap currently in use/committed', 'percent/MB', 'memory usage and gc',
+ 'options': [None, 'JVM Heap Currently in Use/Committed', 'percent/MB', 'memory usage and gc',
'es.jvm_heap', 'area'],
'lines': [
['jvm_heap_percent', 'inuse', 'absolute'],
['jvm_heap_commit', 'commit', 'absolute', -1, 1048576]
]},
'jvm_gc_count': {
- 'options': [None, 'Count of garbage collections', 'counts', 'memory usage and gc', 'es.gc_count', 'stacked'],
+ 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'es.gc_count', 'stacked'],
'lines': [
['young_collection_count', 'young', 'incremental'],
['old_collection_count', 'old', 'incremental']
]},
'jvm_gc_time': {
- 'options': [None, 'Time spent on garbage collections', 'ms', 'memory usage and gc', 'es.gc_time', 'stacked'],
+ 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', 'es.gc_time', 'stacked'],
'lines': [
['young_collection_time_in_millis', 'young', 'incremental'],
['old_collection_time_in_millis', 'old', 'incremental']
]},
'thread_pool_qr_q': {
- 'options': [None, 'Number of queued threads in thread pool', 'queued threads', 'queues and rejections',
+ 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
'es.thread_pool_queued', 'stacked'],
'lines': [
['bulk_queue', 'bulk', 'absolute'],
@@ -179,7 +194,7 @@ CHARTS = {
['merge_queue', 'merge', 'absolute']
]},
'thread_pool_qr_r': {
- 'options': [None, 'Number of rejected threads in thread pool', 'rejected threads', 'queues and rejections',
+ 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
'es.thread_pool_rejected', 'stacked'],
'lines': [
['bulk_rejected', 'bulk', 'absolute'],
@@ -188,19 +203,19 @@ CHARTS = {
['merge_rejected', 'merge', 'absolute']
]},
'fdata_cache': {
- 'options': [None, 'Fielddata cache size', 'MB', 'fielddata cache', 'es.fdata_cache', 'line'],
+ 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'es.fdata_cache', 'line'],
'lines': [
['index_fdata_memory', 'cache', 'absolute', 1, 1048576]
]},
'fdata_ev_tr': {
- 'options': [None, 'Fielddata evictions and circuit breaker tripped count', 'number of events',
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
'fielddata cache', 'es.evictions_tripped', 'line'],
'lines': [
['evictions', None, 'incremental'],
['tripped', None, 'incremental']
]},
'cluster_health_nodes': {
- 'options': [None, 'Nodes and tasks statistics', 'units', 'cluster health API',
+ 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
'es.cluster_health_nodes', 'stacked'],
'lines': [
['health_number_of_nodes', 'nodes', 'absolute'],
@@ -209,7 +224,7 @@ CHARTS = {
['health_number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
]},
'cluster_health_status': {
- 'options': [None, 'Cluster status', 'status', 'cluster health API',
+ 'options': [None, 'Cluster Status', 'status', 'cluster health API',
'es.cluster_health_status', 'area'],
'lines': [
['status_green', 'green', 'absolute'],
@@ -220,7 +235,7 @@ CHARTS = {
['status_yellow', 'yellow', 'absolute']
]},
'cluster_health_shards': {
- 'options': [None, 'Shards statistics', 'shards', 'cluster health API',
+ 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
'es.cluster_health_shards', 'stacked'],
'lines': [
['health_active_shards', 'active_shards', 'absolute'],
@@ -231,7 +246,7 @@ CHARTS = {
['health_active_shards_percent_as_number', 'active_percent', 'absolute']
]},
'cluster_stats_nodes': {
- 'options': [None, 'Nodes statistics', 'nodes', 'cluster stats API',
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
'es.cluster_nodes', 'stacked'],
'lines': [
['count_data_only', 'data_only', 'absolute'],
@@ -241,46 +256,46 @@ CHARTS = {
['count_client', 'client', 'absolute']
]},
'cluster_stats_query_cache': {
- 'options': [None, 'Query cache statistics', 'queries', 'cluster stats API',
+ 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
'es.cluster_query_cache', 'stacked'],
'lines': [
['query_cache_hit_count', 'hit', 'incremental'],
['query_cache_miss_count', 'miss', 'incremental']
]},
'cluster_stats_docs': {
- 'options': [None, 'Docs statistics', 'count', 'cluster stats API',
+ 'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
'es.cluster_docs', 'line'],
'lines': [
['docs_count', 'docs', 'absolute']
]},
'cluster_stats_store': {
- 'options': [None, 'Store statistics', 'MB', 'cluster stats API',
+ 'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
'es.cluster_store', 'line'],
'lines': [
['store_size_in_bytes', 'size', 'absolute', 1, 1048567]
]},
'cluster_stats_indices_shards': {
- 'options': [None, 'Indices and shards statistics', 'count', 'cluster stats API',
+ 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
'es.cluster_indices_shards', 'stacked'],
'lines': [
['indices_count', 'indices', 'absolute'],
['shards_total', 'shards', 'absolute']
]},
'host_metrics_transport': {
- 'options': [None, 'Cluster communication transport metrics', 'kbit/s', 'host metrics',
+ 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
'es.host_transport', 'area'],
'lines': [
['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
]},
'host_metrics_file_descriptors': {
- 'options': [None, 'Available file descriptors in percent', 'percent', 'host metrics',
+ 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
'es.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
]},
'host_metrics_http': {
- 'options': [None, 'Opened HTTP connections', 'connections', 'host metrics',
+ 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
'es.host_http_connections', 'line'],
'lines': [
['http_current_open', 'opened', 'absolute', 1, 1]
@@ -300,12 +315,13 @@ class Service(UrlService):
self.methods = list()
def check(self):
- # We can't start if <host> AND <port> not specified
- if not all([self.host, self.port, isinstance(self.host, str), isinstance(self.port, (str, int))]):
+ if not all([self.host,
+ self.port,
+ isinstance(self.host, str),
+ isinstance(self.port, (str, int))]):
self.error('Host is not defined in the module configuration file')
return False
- # It as a bad idea to use hostname.
# Hostname -> ip address
try:
self.host = gethostbyname(self.host)
@@ -313,45 +329,33 @@ class Service(UrlService):
self.error(str(error))
return False
- scheme = 'http' if self.scheme else 'https'
+ scheme = 'http' if self.scheme == 'http' else 'https'
# Add handlers (auth, self signed cert accept)
self.url = '%s://%s:%s' % (scheme, self.host, self.port)
- self._UrlService__add_openers()
+ self.opener = self._build_opener()
# Create URL for every Elasticsearch API
url_node_stats = '%s://%s:%s/_nodes/_local/stats' % (scheme, self.host, self.port)
url_cluster_health = '%s://%s:%s/_cluster/health' % (scheme, self.host, self.port)
url_cluster_stats = '%s://%s:%s/_cluster/stats' % (scheme, self.host, self.port)
- # Create list of enabled API calls
user_choice = [bool(self.configuration.get('node_stats', True)),
bool(self.configuration.get('cluster_health', True)),
bool(self.configuration.get('cluster_stats', True))]
- avail_methods = [METHODS(get_data_function=self._get_node_stats_, url=url_node_stats),
- METHODS(get_data_function=self._get_cluster_health_, url=url_cluster_health),
- METHODS(get_data_function=self._get_cluster_stats_, url=url_cluster_stats)]
+ avail_methods = [METHODS(get_data_function=self._get_node_stats_,
+ url=url_node_stats),
+ METHODS(get_data_function=self._get_cluster_health_,
+ url=url_cluster_health),
+ METHODS(get_data_function=self._get_cluster_stats_,
+ url=url_cluster_stats)]
# Remove disabled API calls from 'avail methods'
self.methods = [avail_methods[e[0]] for e in enumerate(avail_methods) if user_choice[e[0]]]
-
- # Run _get_data for ALL active API calls.
- api_check_result = dict()
- data_from_check = dict()
- for method in self.methods:
- try:
- api_check_result[method.url] = method.get_data_function(None, method.url)
- data_from_check.update(api_check_result[method.url] or dict())
- except KeyError as error:
- self.error('Failed to parse %s. Error: %s' % (method.url, str(error)))
- return False
-
- # We can start ONLY if all active API calls returned NOT None
- if not all(api_check_result.values()):
- self.error('Plugin could not get data from all APIs')
+ data = self._get_data()
+ if not data:
return False
- else:
- self._data_from_check = data_from_check
- return True
+ self._data_from_check = data
+ return True
def _get_data(self):
threads = list()
@@ -359,7 +363,8 @@ class Service(UrlService):
result = dict()
for method in self.methods:
- th = Thread(target=method.get_data_function, args=(queue, method.url))
+ th = Thread(target=method.get_data_function,
+ args=(queue, method.url))
th.start()
threads.append(th)
@@ -378,18 +383,18 @@ class Service(UrlService):
raw_data = self._get_raw_data(url)
if not raw_data:
- return queue.put(dict()) if queue else None
- else:
- data = loads(raw_data)
+ return queue.put(dict())
- to_netdata = fetch_data_(raw_data=data, metrics_list=HEALTH_STATS)
+ data = loads(raw_data)
+ to_netdata = fetch_data_(raw_data=data,
+ metrics_list=HEALTH_STATS)
- to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
- 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
- current_status = 'status_' + data['status']
- to_netdata[current_status] = 1
+ to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
+ 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
+ current_status = 'status_' + data['status']
+ to_netdata[current_status] = 1
- return queue.put(to_netdata) if queue else to_netdata
+ return queue.put(to_netdata)
def _get_cluster_stats_(self, queue, url):
"""
@@ -400,13 +405,13 @@ class Service(UrlService):
raw_data = self._get_raw_data(url)
if not raw_data:
- return queue.put(dict()) if queue else None
- else:
- data = loads(raw_data)
+ return queue.put(dict())
- to_netdata = fetch_data_(raw_data=data, metrics_list=CLUSTER_STATS)
+ data = loads(raw_data)
+ to_netdata = fetch_data_(raw_data=data,
+ metrics_list=CLUSTER_STATS)
- return queue.put(to_netdata) if queue else to_netdata
+ return queue.put(to_netdata)
def _get_node_stats_(self, queue, url):
"""
@@ -417,47 +422,46 @@ class Service(UrlService):
raw_data = self._get_raw_data(url)
if not raw_data:
- return queue.put(dict()) if queue else None
- else:
- data = loads(raw_data)
-
- node = list(data['nodes'].keys())[0]
- to_netdata = fetch_data_(raw_data=data['nodes'][node], metrics_list=NODE_STATS)
+ return queue.put(dict())
- # Search performance latency
- to_netdata['query_latency'] = self.find_avg_(to_netdata['query_total'],
- to_netdata['query_time_in_millis'], 'query_latency')
- to_netdata['fetch_latency'] = self.find_avg_(to_netdata['fetch_total'],
- to_netdata['fetch_time_in_millis'], 'fetch_latency')
+ data = loads(raw_data)
- # Indexing performance latency
- to_netdata['indexing_latency'] = self.find_avg_(to_netdata['indexing_index_total'],
- to_netdata['indexing_index_time_in_millis'], 'index_latency')
- to_netdata['flushing_latency'] = self.find_avg_(to_netdata['flush_total'],
- to_netdata['flush_total_time_in_millis'], 'flush_latency')
+ node = list(data['nodes'].keys())[0]
+ to_netdata = fetch_data_(raw_data=data['nodes'][node],
+ metrics_list=NODE_STATS)
+ # Search, index, flush, fetch performance latency
+ for key in LATENCY:
+ try:
+ to_netdata[key] = self.find_avg_(total=to_netdata[LATENCY[key]['total']],
+ spent_time=to_netdata[LATENCY[key]['spent_time']],
+ key=key)
+ except KeyError:
+ continue
+ if 'open_file_descriptors' in to_netdata and 'max_file_descriptors' in to_netdata:
to_netdata['file_descriptors_used'] = round(float(to_netdata['open_file_descriptors'])
/ to_netdata['max_file_descriptors'] * 1000)
- return queue.put(to_netdata) if queue else to_netdata
+ return queue.put(to_netdata)
- def find_avg_(self, value1, value2, key):
+ def find_avg_(self, total, spent_time, key):
if key not in self.latency:
- self.latency.update({key: [value1, value2]})
+ self.latency[key] = dict(total=total,
+ spent_time=spent_time)
return 0
- else:
- if not self.latency[key][0] == value1:
- latency = round(float(value2 - self.latency[key][1]) / float(value1 - self.latency[key][0]) * 1000)
- self.latency.update({key: [value1, value2]})
- return latency
- else:
- self.latency.update({key: [value1, value2]})
- return 0
+ if self.latency[key]['total'] != total:
+ latency = float(spent_time - self.latency[key]['spent_time'])\
+ / float(total - self.latency[key]['total']) * 1000
+ self.latency[key]['total'] = total
+ self.latency[key]['spent_time'] = spent_time
+ return latency
+ self.latency[key]['spent_time'] = spent_time
+ return 0
def fetch_data_(raw_data, metrics_list):
to_netdata = dict()
- for metric, new_name, function in metrics_list:
+ for metric, new_name, func in metrics_list:
value = raw_data
for key in metric.split('.'):
try:
@@ -465,7 +469,7 @@ def fetch_data_(raw_data, metrics_list):
except KeyError:
break
if not isinstance(value, dict) and key:
- to_netdata[new_name or key] = value if not function else function(value)
+ to_netdata[new_name or key] = value if not func else func(value)
return to_netdata
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
index c7d24e8c..35761e89 100644
--- a/python.d/fail2ban.chart.py
+++ b/python.d/fail2ban.chart.py
@@ -2,131 +2,210 @@
# Description: fail2ban log netdata python.d module
# Author: l2isbad
-from base import LogService
-from re import compile
-
-try:
- from itertools import filterfalse
-except ImportError:
- from itertools import ifilterfalse as filterfalse
+from re import compile as r_compile
from os import access as is_accessible, R_OK
-from os.path import isdir
+from os.path import isdir, getsize
from glob import glob
+import bisect
+from base import LogService
priority = 60000
retries = 60
-REGEX = compile(r'\[([A-Za-z-_]+)][^\[\]]*?(?<!# )enabled = true')
-ORDER = ['jails_group']
+REGEX_JAILS = r_compile(r'\[([A-Za-z-_0-9]+)][^\[\]]*?(?<!# )enabled = (?:(true|false))')
+REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>(?:(U|B)))[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
+ORDER = ['jails_bans', 'jails_in_jail']
class Service(LogService):
+ """
+ fail2ban log class
+ Reads logs line by line
+ Jail auto detection included
+ It produces following charts:
+ * Bans per second for every jail
+ * Banned IPs for every jail (since the last restart of netdata)
+ """
def __init__(self, configuration=None, name=None):
LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
+ self.definitions = dict()
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
- self.conf_dir = self.configuration.get('conf_dir', '')
- try:
- self.exclude = self.configuration['exclude'].split()
- except (KeyError, AttributeError):
- self.exclude = []
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude')
def _get_data(self):
"""
Parse new log lines
:return: dict
"""
- try:
- raw = self._get_raw_data()
- if raw is None:
- return None
- elif not raw:
- return self.data
- except (ValueError, AttributeError):
+ raw = self._get_raw_data()
+ if raw is None:
return None
+ elif not raw:
+ return self.to_netdata
# Fail2ban logs looks like
# 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
- data = dict(
- zip(
- self.jails_list,
- [len(list(filterfalse(lambda line: (jail + '] Ban') not in line, raw))) for jail in self.jails_list]
- ))
-
- for jail in data:
- self.data[jail] += data[jail]
-
- return self.data
+ for row in raw:
+ match = REGEX_DATA.search(row)
+ if match:
+ match_dict = match.groupdict()
+ jail, action, ipaddr = match_dict['jail'], match_dict['action'], match_dict['ipaddr']
+ if jail in self.jails_list:
+ if action == 'B':
+ self.to_netdata[jail] += 1
+ if address_not_in_jail(self.banned_ips[jail], ipaddr, self.to_netdata[jail + '_in_jail']):
+ self.to_netdata[jail + '_in_jail'] += 1
+ else:
+ if ipaddr in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ipaddr)
+ self.to_netdata[jail + '_in_jail'] -= 1
+
+ return self.to_netdata
def check(self):
+ """
+ :return: bool
- # Check "log_path" is accessible.
- # If NOT STOP plugin
- if not is_accessible(self.log_path, R_OK):
- self.error('Cannot access file %s' % self.log_path)
- return False
- jails_list = list()
-
- if self.conf_dir:
- dir_jails, error = parse_conf_dir(self.conf_dir)
- jails_list.extend(dir_jails)
- if not dir_jails:
- self.error(error)
-
- if self.conf_path:
- path_jails, error = parse_conf_path(self.conf_path)
- jails_list.extend(path_jails)
- if not path_jails:
- self.error(error)
+ Check if the "log_path" is not empty and readable
+ """
- # If for some reason parse failed we still can START with default jails_list.
- self.jails_list = list(set(jails_list) - set(self.exclude)) or ['ssh']
- self.data = dict([(jail, 0) for jail in self.jails_list])
- self.create_dimensions()
- self.info('Plugin successfully started. Jails: %s' % self.jails_list)
+ if not (is_accessible(self.log_path, R_OK) and getsize(self.log_path) != 0):
+ self.error('%s is not readable or empty' % self.log_path)
+ return False
+ self.jails_list, self.to_netdata, self.banned_ips = self.jails_auto_detection_()
+ self.definitions = create_definitions_(self.jails_list)
+ self.info('Jails: %s' % self.jails_list)
return True
- def create_dimensions(self):
- self.definitions = {
- 'jails_group': {'options': [None, "Jails ban statistics", "bans/s", 'jails', 'jail.ban', 'line'],
- 'lines': []}}
- for jail in self.jails_list:
- self.definitions['jails_group']['lines'].append([jail, jail, 'incremental'])
-
+ def jails_auto_detection_(self):
+ """
+ return: <tuple>
-def parse_conf_dir(conf_dir):
- if not isdir(conf_dir):
- return list(), '%s is not a directory' % conf_dir
+ * jails_list - list of enabled jails (['ssh', 'apache', ...])
+ * to_netdata - dict ({'ssh': 0, 'ssh_in_jail': 0, ...})
+ * banned_ips - here will be stored all the banned ips ({'ssh': ['1.2.3.4', '5.6.7.8', ...], ...})
+ """
+ raw_jails_list = list()
+ jails_list = list()
- jail_local = list(filter(lambda local: is_accessible(local, R_OK), glob(conf_dir + '/*.local')))
- jail_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(conf_dir + '/*.conf')))
+ for raw_jail in parse_configuration_files_(self.conf_path, self.conf_dir, self.error):
+ raw_jails_list.extend(raw_jail)
- if not (jail_local or jail_conf):
- return list(), '%s is empty or not readable' % conf_dir
+ for jail, status in raw_jails_list:
+ if status == 'true' and jail not in jails_list:
+ jails_list.append(jail)
+ elif status == 'false' and jail in jails_list:
+ jails_list.remove(jail)
- # According "man jail.conf" files could be *.local AND *.conf
- # *.conf files parsed first. Changes in *.local overrides configuration in *.conf
- if jail_conf:
- jail_local.extend([conf for conf in jail_conf if conf[:-5] not in [local[:-6] for local in jail_local]])
+ # If for some reason parse failed we still can START with default jails_list.
+ jails_list = list(set(jails_list) - set(self.exclude.split()
+ if isinstance(self.exclude, str) else list())) or ['ssh']
+
+ to_netdata = dict([(jail, 0) for jail in jails_list])
+ to_netdata.update(dict([(jail + '_in_jail', 0) for jail in jails_list]))
+ banned_ips = dict([(jail, list()) for jail in jails_list])
+
+ return jails_list, to_netdata, banned_ips
+
+
+def create_definitions_(jails_list):
+ """
+ Chart definitions creating
+ """
+
+ definitions = {
+ 'jails_bans': {'options': [None, 'Jails Ban Statistics', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []},
+ 'jails_in_jail': {'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs',
+ 'in jail', 'jail.in_jail', 'line'],
+ 'lines': []}}
+ for jail in jails_list:
+ definitions['jails_bans']['lines'].append([jail, jail, 'incremental'])
+ definitions['jails_in_jail']['lines'].append([jail + '_in_jail', jail, 'absolute'])
+
+ return definitions
+
+
+def parse_configuration_files_(jails_conf_path, jails_conf_dir, print_error):
+ """
+ :param jails_conf_path: <str>
+ :param jails_conf_dir: <str>
+ :param print_error: <function>
+ :return: <tuple>
+
+ Uses "find_jails_in_files" function to find all jails in the "jails_conf_dir" directory
+ and in the "jails_conf_path"
+
+ All files must endswith ".local" or ".conf"
+ Return order is important.
+ According man jail.conf it should be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ path_conf, path_local, dir_conf, dir_local = list(), list(), list(), list()
+
+ # Parse files in the directory
+ if not (isinstance(jails_conf_dir, str) and isdir(jails_conf_dir)):
+ print_error('%s is not a directory' % jails_conf_dir)
+ else:
+ dir_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(jails_conf_dir + '/*.conf')))
+ dir_local = list(filter(lambda local: is_accessible(local, R_OK), glob(jails_conf_dir + '/*.local')))
+ if not (dir_conf or dir_local):
+ print_error('%s is empty or not readable' % jails_conf_dir)
+ else:
+ dir_conf, dir_local = (find_jails_in_files(dir_conf, print_error),
+ find_jails_in_files(dir_local, print_error))
+
+ # Parse .conf and .local files
+ if isinstance(jails_conf_path, str) and jails_conf_path.endswith(('.local', '.conf')):
+ path_conf, path_local = (find_jails_in_files([jails_conf_path.split('.')[0] + '.conf'], print_error),
+ find_jails_in_files([jails_conf_path.split('.')[0] + '.local'], print_error))
+
+ return path_conf, dir_conf, path_local, dir_local
+
+
+def find_jails_in_files(list_of_files, print_error):
+ """
+ :param list_of_files: <list>
+ :param print_error: <function>
+ :return: <list>
+
+ Open a file and parse it to find all (enabled and disabled) jails
+ The output is a list of tuples:
+ [('ssh', 'true'), ('apache', 'false'), ...]
+ """
jails_list = list()
- for conf in jail_local:
- with open(conf, 'rt') as f:
- raw_data = f.read()
-
- data = ' '.join(raw_data.split())
- jails_list.extend(REGEX.findall(data))
- jails_list = list(set(jails_list))
-
- return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_dir
-
-
-def parse_conf_path(conf_path):
- if not is_accessible(conf_path, R_OK):
- return list(), '%s is not readable' % conf_path
-
- with open(conf_path, 'rt') as jails_conf:
- raw_data = jails_conf.read()
-
- data = raw_data.split()
- jails_list = REGEX.findall(' '.join(data))
- return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_path
+ for conf in list_of_files:
+ if is_accessible(conf, R_OK):
+ with open(conf, 'rt') as conf:
+ raw_data = conf.read()
+ data = ' '.join(raw_data.split())
+ jails_list.extend(REGEX_JAILS.findall(data))
+ else:
+ print_error('%s is not readable or not exist' % conf)
+ return jails_list
+
+
+def address_not_in_jail(pool, address, pool_size):
+ """
+ :param pool: <list>
+ :param address: <str>
+ :param pool_size: <int>
+ :return: bool
+
+ Checks if the address is in the pool.
+ If not address will be added
+ """
+ index = bisect.bisect_left(pool, address)
+ if index < pool_size:
+ if pool[index] == address:
+ return False
+ bisect.insort_left(pool, address)
+ return True
+ else:
+ bisect.insort_left(pool, address)
+ return True
diff --git a/python.d/go_expvar.chart.py b/python.d/go_expvar.chart.py
new file mode 100644
index 00000000..e1a334cc
--- /dev/null
+++ b/python.d/go_expvar.chart.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+# Description: go_expvar netdata python.d module
+# Author: Jan Kral (kralewitz)
+
+from __future__ import division
+from base import UrlService
+import json
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+
+MEMSTATS_CHARTS = {
+ 'memstats_heap': {
+ 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats', 'expvar.memstats.heap', 'line'],
+ 'lines': [
+ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
+ ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_stack': {
+ 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats', 'expvar.memstats.stack', 'line'],
+ 'lines': [
+ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_mspan': {
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats', 'expvar.memstats.mspan', 'line'],
+ 'lines': [
+ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_mcache': {
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats', 'expvar.memstats.mcache', 'line'],
+ 'lines': [
+ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_live_objects': {
+ 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats', 'expvar.memstats.live_objects', 'line'],
+ 'lines': [
+ ['memstats_live_objects', 'live']
+ ]},
+ 'memstats_sys': {
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats', 'expvar.memstats.sys', 'line'],
+ 'lines': [
+ ['memstats_sys', 'sys', 'absolute', 1, 1024]
+ ]},
+ 'memstats_gc_pauses': {
+ 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats', 'expvar.memstats.gc_pauses', 'line'],
+ 'lines': [
+ ['memstats_gc_pauses', 'avg']
+ ]},
+}
+
+MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache', 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
+
+
+def flatten(d, top='', sep='.'):
+ items = []
+ for key, val in d.items():
+ nkey = top + sep + key if top else key
+ if isinstance(val, dict):
+ items.extend(flatten(val, nkey, sep=sep).items())
+ else:
+ items.append((nkey, val))
+ return dict(items)
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+
+ # if memstats collection is enabled, add the charts and their order
+ if self.configuration.get('collect_memstats'):
+ self.definitions = MEMSTATS_CHARTS
+ self.order = MEMSTATS_ORDER
+ else:
+ self.definitions = dict()
+ self.order = list()
+
+ # if extra charts are defined, parse their config
+ extra_charts = self.configuration.get('extra_charts')
+ if extra_charts:
+ self._parse_extra_charts_config(extra_charts)
+
+ def check(self):
+ """
+ Check if the module can collect data:
+ 1) At least one JOB configuration has to be specified
+ 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
+ extra_chart must be defined.
+
+ The configuration and URL check is provided by the UrlService class.
+ """
+
+ if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
+ self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
+ return False
+
+ return UrlService.check(self)
+
+ def _parse_extra_charts_config(self, extra_charts_config):
+
+ # a place to store the expvar keys and their types
+ self.expvars = dict()
+
+ for chart in extra_charts_config:
+
+ chart_dict = dict()
+ chart_id = chart.get('id')
+ chart_lines = chart.get('lines')
+ chart_opts = chart.get('options', dict())
+
+ if not all([chart_id, chart_lines]):
+ self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
+ continue
+
+ chart_dict['options'] = [
+ chart_opts.get('name', ''),
+ chart_opts.get('title', ''),
+ chart_opts.get('units', ''),
+ chart_opts.get('family', ''),
+ chart_opts.get('context', ''),
+ chart_opts.get('chart_type', 'line')
+ ]
+ chart_dict['lines'] = list()
+
+ # add the lines to the chart
+ for line in chart_lines:
+
+ ev_key = line.get('expvar_key')
+ ev_type = line.get('expvar_type')
+ line_id = line.get('id')
+
+ if not all([ev_key, ev_type, line_id]):
+ self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
+ continue
+
+ if ev_type not in ['int', 'float']:
+ self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
+ continue
+
+ if ev_key in self.expvars:
+ self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key))
+ continue
+
+ self.expvars[ev_key] = (ev_type, line_id)
+
+ chart_dict['lines'].append(
+ [
+ line.get('id', ''),
+ line.get('name', ''),
+ line.get('algorithm', ''),
+ line.get('multiplier', 1),
+ line.get('divisor', 100 if ev_type == 'float' else 1),
+ line.get('hidden', False)
+ ]
+ )
+
+ self.order.append(chart_id)
+ self.definitions[chart_id] = chart_dict
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = json.loads(raw_data)
+
+ expvars = dict()
+ if self.configuration.get('collect_memstats'):
+ expvars.update(self._parse_memstats(data))
+
+ if self.configuration.get('extra_charts'):
+ # the memstats part of the data has been already parsed, so we remove it before flattening and checking
+ # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
+ del (data['memstats'])
+ flattened = flatten(data)
+ for k, v in flattened.items():
+ ev = self.expvars.get(k)
+ if not ev:
+ # expvar is not defined in config, skip it
+ continue
+ try:
+ key_type, line_id = ev
+ if key_type == 'int':
+ expvars[line_id] = int(v)
+ elif key_type == 'float':
+ # if the value type is float, multiply it by 1000 and set line divisor to 1000
+ expvars[line_id] = float(v) * 100
+ except ValueError:
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type))
+ del self.expvars[k]
+
+ return expvars
+
+ @staticmethod
+ def _parse_memstats(data):
+
+ memstats = data['memstats']
+
+ # calculate the number of live objects in memory
+ live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
+
+ # calculate GC pause times average
+ # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
+ # so we need to filter out the 0 values before the buffer is filled
+ gc_pauses = memstats['PauseNs']
+ try:
+ gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
+ # no GC cycles have occured yet
+ except ZeroDivisionError:
+ gc_pause_avg = 0
+
+ return {
+ 'memstats_heap_alloc': memstats['HeapAlloc'],
+ 'memstats_heap_inuse': memstats['HeapInuse'],
+ 'memstats_stack_inuse': memstats['StackInuse'],
+ 'memstats_mspan_inuse': memstats['MSpanInuse'],
+ 'memstats_mcache_inuse': memstats['MCacheInuse'],
+ 'memstats_sys': memstats['Sys'],
+ 'memstats_live_objects': live_objs,
+ 'memstats_gc_pauses': gc_pause_avg,
+ }
diff --git a/python.d/haproxy.chart.py b/python.d/haproxy.chart.py
index 2fb97d75..67a6f782 100644
--- a/python.d/haproxy.chart.py
+++ b/python.d/haproxy.chart.py
@@ -3,6 +3,13 @@
# Author: l2isbad
from base import UrlService, SocketService
+from collections import defaultdict
+from re import compile as re_compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -10,135 +17,140 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', 'bbin', 'bbout', 'bscur', 'bqcur', 'health_sdown', 'health_bdown']
+ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', 'bbin', 'bbout', 'bscur', 'bqcur',
+ 'health_sdown', 'health_bdown', 'health_idle']
CHARTS = {
'fbin': {
- 'options': [None, "Kilobytes in", "kilobytes in/s", 'Frontend', 'haproxy_f.bin', 'line'],
+ 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'],
'lines': [
]},
'fbout': {
- 'options': [None, "Kilobytes out", "kilobytes out/s", 'Frontend', 'haproxy_f.bout', 'line'],
+ 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'],
'lines': [
]},
'fscur': {
- 'options': [None, "Sessions active", "sessions", 'Frontend', 'haproxy_f.scur', 'line'],
+ 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'],
'lines': [
]},
'fqcur': {
- 'options': [None, "Session in queue", "sessions", 'Frontend', 'haproxy_f.qcur', 'line'],
+ 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'],
'lines': [
]},
'bbin': {
- 'options': [None, "Kilobytes in", "kilobytes in/s", 'Backend', 'haproxy_b.bin', 'line'],
+ 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'],
'lines': [
]},
'bbout': {
- 'options': [None, "Kilobytes out", "kilobytes out/s", 'Backend', 'haproxy_b.bout', 'line'],
+ 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'],
'lines': [
]},
'bscur': {
- 'options': [None, "Sessions active", "sessions", 'Backend', 'haproxy_b.scur', 'line'],
+ 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'],
'lines': [
]},
'bqcur': {
- 'options': [None, "Sessions in queue", "sessions", 'Backend', 'haproxy_b.qcur', 'line'],
+ 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'],
'lines': [
]},
'health_sdown': {
- 'options': [None, "Number of servers in backend in DOWN state", "failed servers", 'Health', 'haproxy_hs.down', 'line'],
+ 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health',
+ 'haproxy_hs.down', 'line'],
'lines': [
]},
'health_bdown': {
- 'options': [None, "Is backend alive? 1 = DOWN", "failed backend", 'Health', 'haproxy_hb.down', 'line'],
+ 'options': [None, "Is Backend Alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'],
+ 'lines': [
+ ]},
+ 'health_idle': {
+ 'options': [None, "The Ratio Of Polling Time Vs Total Time", "percent", 'health', 'haproxy.idle', 'line'],
'lines': [
+ ['idle', None, 'absolute']
]}
}
+METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1}}
+
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'), socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+
class Service(UrlService, SocketService):
def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- self.request = 'show stat\n'
- self.poll_method = (UrlService, SocketService)
+ if 'socket' in configuration:
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.poll = SocketService
+ self.options_ = dict(regex=REGEX['socket'], stat='show stat\n', info='show info\n')
+ else:
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.poll = UrlService
+ self.options_ = dict(regex=REGEX['url'], stat=self.url, info=url_remove_params(self.url))
self.order = ORDER
- self.order_front = [_ for _ in ORDER if _.startswith('f')]
- self.order_back = [_ for _ in ORDER if _.startswith('b')]
self.definitions = CHARTS
- self.charts = True
def check(self):
- if self.configuration.get('url'):
- self.poll_method = self.poll_method[0]
- url = self.configuration.get('url')
- if not url.endswith(';csv;norefresh'):
- self.error('Bad url(%s). Must be http://<ip.address>:<port>/<url>;csv;norefresh' % url)
- return False
- elif self.configuration.get('socket'):
- self.poll_method = self.poll_method[1]
- else:
- self.error('No configuration is specified')
- return False
-
- if self.poll_method.check(self):
- self.info('Plugin was started succesfully. We are using %s.' % self.poll_method.__name__)
+ if self.poll.check(self):
+ self.create_charts()
+ self.info('We are using %s.' % self.poll.__name__)
return True
+ return False
- def create_charts(self, front_ends, back_ends):
- for _ in range(len(front_ends)):
- self.definitions['fbin']['lines'].append(['_'.join(['fbin', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['fbout']['lines'].append(['_'.join(['fbout', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['fscur']['lines'].append(['_'.join(['fscur', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'absolute'])
- self.definitions['fqcur']['lines'].append(['_'.join(['fqcur', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'absolute'])
-
- for _ in range(len(back_ends)):
- self.definitions['bbin']['lines'].append(['_'.join(['bbin', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['bbout']['lines'].append(['_'.join(['bbout', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['bscur']['lines'].append(['_'.join(['bscur', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
- self.definitions['bqcur']['lines'].append(['_'.join(['bqcur', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
- self.definitions['health_sdown']['lines'].append(['_'.join(['hsdown', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
- self.definitions['health_bdown']['lines'].append(['_'.join(['hbdown', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
-
def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw_data = self.poll_method._get_raw_data(self).splitlines()
- except Exception as e:
- self.error(str(e))
- return None
-
- all_instances = [dict(zip(raw_data[0].split(','), raw_data[_].split(','))) for _ in range(1, len(raw_data))]
-
- back_ends = list(filter(is_backend, all_instances))
- front_ends = list(filter(is_frontend, all_instances))
- servers = list(filter(is_server, all_instances))
-
- if self.charts:
- self.create_charts(front_ends, back_ends)
- self.charts = False
-
to_netdata = dict()
+ self.request, self.url = self.options_['stat'], self.options_['stat']
+ stat_data = self._get_stat_data()
+ self.request, self.url = self.options_['info'], self.options_['info']
+ info_data = self._get_info_data(regex=self.options_['regex'])
- for frontend in front_ends:
- for _ in self.order_front:
- to_netdata.update({'_'.join([_, frontend['# pxname']]): int(frontend[_[1:]]) if frontend.get(_[1:]) else 0})
+ to_netdata.update(stat_data)
+ to_netdata.update(info_data)
+ return to_netdata or None
- for backend in back_ends:
- for _ in self.order_back:
- to_netdata.update({'_'.join([_, backend['# pxname']]): int(backend[_[1:]]) if backend.get(_[1:]) else 0})
-
- for _ in range(len(back_ends)):
- to_netdata.update({'_'.join(['hsdown', back_ends[_]['# pxname']]):
- len([server for server in servers if is_server_down(server, back_ends, _)])})
- to_netdata.update({'_'.join(['hbdown', back_ends[_]['# pxname']]): 1 if is_backend_down(back_ends, _) else 0})
+ def _get_stat_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+
+ if not raw_data:
+ return dict()
+
+ raw_data = raw_data.splitlines()
+ self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
+ for _ in range(1, len(raw_data))])
+ if not self.data:
+ return dict()
+
+ stat_data = dict()
+
+ for frontend in self.data['frontend']:
+ for metric in METRICS:
+ idx = frontend['# pxname'].replace('.', '_')
+ stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
+
+ for backend in self.data['backend']:
+ name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
+ stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
+ if server_down(server, name)])
+ stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
+ for metric in METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ return stat_data
+
+ def _get_info_data(self, regex):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+ if not raw_data:
+ return dict()
- return to_netdata
+ match = regex.search(raw_data)
+ return match.groupdict() if match else dict()
- def _check_raw_data(self, data):
+ @staticmethod
+ def _check_raw_data(data):
"""
Check if all data has been gathered from socket
:param data: str
@@ -146,32 +158,54 @@ class Service(UrlService, SocketService):
"""
return not bool(data)
-def is_backend(backend):
- try:
- return backend['svname'] == 'BACKEND' and backend['# pxname'] != 'stats'
- except Exception:
- return False
-
-def is_frontend(frontend):
- try:
- return frontend['svname'] == 'FRONTEND' and frontend['# pxname'] != 'stats'
- except Exception:
- return False
-
-def is_server(server):
- try:
- return not server['svname'].startswith(('FRONTEND', 'BACKEND'))
- except Exception:
- return False
-
-def is_server_down(server, back_ends, _):
- try:
- return server['# pxname'] == back_ends[_]['# pxname'] and server['status'] == 'DOWN'
- except Exception:
- return False
-
-def is_backend_down(back_ends, _):
- try:
- return back_ends[_]['status'] == 'DOWN'
- except Exception:
- return False
+ def create_charts(self):
+ for front in self.data['frontend']:
+ name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ for back in self.data['backend']:
+ name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
+
+
+def parse_data_(data):
+ def is_backend(backend):
+ return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
+
+ def is_frontend(frontend):
+ return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
+
+ def is_server(server):
+ return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
+
+ if not data:
+ return None
+
+ result = defaultdict(list)
+ for elem in data:
+ if is_backend(elem):
+ result['backend'].append(elem)
+ continue
+ elif is_frontend(elem):
+ result['frontend'].append(elem)
+ continue
+ elif is_server(elem):
+ result['servers'].append(elem)
+
+ return result or None
+
+
+def server_down(server, backend_name):
+ return server.get('# pxname') == backend_name and server.get('status') == 'DOWN'
+
+
+def url_remove_params(url):
+ parsed = urlparse(url or str())
+ return '%s://%s%s' % (parsed.scheme, parsed.netloc, parsed.path)
diff --git a/python.d/isc_dhcpd.chart.py b/python.d/isc_dhcpd.chart.py
index bb9ba5cb..a437f803 100644
--- a/python.d/isc_dhcpd.chart.py
+++ b/python.d/isc_dhcpd.chart.py
@@ -2,30 +2,56 @@
# Description: isc dhcpd lease netdata python.d module
# Author: l2isbad
-from base import SimpleService
from time import mktime, strptime, gmtime, time
-from os import stat
+from os import stat, access, R_OK
+from os.path import isfile
try:
- from ipaddress import IPv4Address as ipaddress
- from ipaddress import ip_network
+ from ipaddress import ip_network, ip_address
have_ipaddress = True
except ImportError:
have_ipaddress = False
try:
- from itertools import filterfalse as filterfalse
+ from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
-
+from base import SimpleService
priority = 60000
retries = 60
-update_every = 60
+update_every = 5
+
+ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total', 'parse_time', 'leases_size']
+
+CHARTS = {
+ 'pools_utilization': {
+ 'options': [None, 'Pools Utilization', 'used in percent', 'utilization',
+ 'isc_dhcpd.utilization', 'line'],
+ 'lines': []},
+ 'pools_active_leases': {
+ 'options': [None, 'Active Leases', 'leases per pool', 'active leases',
+ 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []},
+ 'leases_total': {
+ 'options': [None, 'Total All Pools', 'number', 'active leases',
+ 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [['leases_total', 'leases', 'absolute']]},
+ 'parse_time': {
+ 'options': [None, 'Parse Time', 'ms', 'parse stats',
+ 'isc_dhcpd.parse_time', 'line'],
+ 'lines': [['parse_time', 'time', 'absolute']]},
+ 'leases_size': {
+ 'options': [None, 'Dhcpd Leases File Size', 'kilobytes',
+ 'parse stats', 'isc_dhcpd.leases_size', 'line'],
+ 'lines': [['leases_size', 'size', 'absolute', 1, 1024]]}}
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.leases_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
- self.pools = self.configuration.get('pools')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.pools = dict()
# Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
# TODO: update algorithm to parse correctly 'local' db-time-format
@@ -33,50 +59,30 @@ class Service(SimpleService):
# Also only ipv4 supported
def check(self):
- if not self._get_raw_data():
+ if not have_ipaddress:
+ self.error('\'python-ipaddress\' module is needed')
+ return False
+ if not (isfile(self.leases_path) and access(self.leases_path, R_OK)):
self.error('Make sure leases_path is correct and leases log file is readable by netdata')
return False
- elif not have_ipaddress:
- self.error('No ipaddress module. Please install (py2-ipaddress in case of python2)')
+ if not self.configuration.get('pools'):
+ self.error('Pools are not defined')
return False
- else:
+ if not isinstance(self.configuration['pools'], dict):
+ self.error('Invalid \'pools\' format')
+ return False
+
+ for pool in self.configuration['pools']:
try:
- self.pools = self.pools.split()
- if not [ip_network(return_utf(pool)) for pool in self.pools]:
- self.error('Pools list is empty')
- return False
- except (ValueError, IndexError, AttributeError, SyntaxError) as e:
- self.error('Pools configurations is incorrect', str(e))
- return False
-
- # Creating static charts
- self.order = ['parse_time', 'leases_size', 'utilization', 'total']
- self.definitions = {'utilization':
- {'options':
- [None, 'Pools utilization', 'used %', 'utilization', 'isc_dhcpd.util', 'line'],
- 'lines': []},
- 'total':
- {'options':
- [None, 'Total all pools', 'leases', 'utilization', 'isc_dhcpd.total', 'line'],
- 'lines': [['total', 'leases', 'absolute']]},
- 'parse_time':
- {'options':
- [None, 'Parse time', 'ms', 'parse stats', 'isc_dhcpd.parse', 'line'],
- 'lines': [['ptime', 'time', 'absolute']]},
- 'leases_size':
- {'options':
- [None, 'dhcpd.leases file size', 'kilobytes', 'parse stats', 'isc_dhcpd.lsize', 'line'],
- 'lines': [['lsize', 'size', 'absolute']]}}
- # Creating dynamic charts
- for pool in self.pools:
- self.definitions['utilization']['lines'].append([''.join(['ut_', pool]), pool, 'absolute'])
- self.order.append(''.join(['leases_', pool]))
- self.definitions[''.join(['leases_', pool])] = \
- {'options': [None, 'Active leases', 'leases', 'pools', 'isc_dhcpd.lease', 'area'],
- 'lines': [[''.join(['le_', pool]), pool, 'absolute']]}
-
- self.info('Plugin was started succesfully')
- return True
+ net = ip_network(u'%s' % self.configuration['pools'][pool])
+ self.pools[pool] = dict(net=net, num_hosts=net.num_addresses - 2)
+ except ValueError as error:
+ self.error('%s removed, error: %s' % (self.configuration['pools'][pool], error))
+
+ if not self.pools:
+ return False
+ self.create_charts()
+ return True
def _get_raw_data(self):
"""
@@ -87,65 +93,60 @@ class Service(SimpleService):
)
"""
try:
- with open(self.leases_path, 'rt') as dhcp_leases:
+ with open(self.leases_path) as leases:
time_start = time()
- part1 = filterfalse(find_lease, dhcp_leases)
- part2 = filterfalse(find_ends, dhcp_leases)
- raw_result = dict(zip(part1, part2))
+ part1 = filterfalse(find_lease, leases)
+ part2 = filterfalse(find_ends, leases)
+ result = dict(zip(part1, part2))
time_end = time()
file_parse_time = round((time_end - time_start) * 1000)
- except Exception as e:
- self.error("Failed to parse leases file:", str(e))
+ return result, file_parse_time
+ except (OSError, IOError) as error:
+ self.error("Failed to parse leases file:", str(error))
return None
- else:
- result = (raw_result, file_parse_time)
- return result
def _get_data(self):
"""
:return: dict
"""
- raw_leases = self._get_raw_data()
- if not raw_leases:
+ raw_data = self._get_raw_data()
+ if not raw_data:
return None
- # Result: {ipaddress: end lease time, ...}
- all_leases = dict([(k[6:len(k)-3], v[7:len(v)-2]) for k, v in raw_leases[0].items()])
-
- # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
- active_leases = [k for k, v in all_leases.items() if is_binding_active(all_leases[k])]
-
- # Result: {pool: number of active bindings in pool, ...}
- pools_count = dict([(pool, len([lease for lease in active_leases if is_address_in(lease, pool)]))
- for pool in self.pools])
-
- # Result: {pool: number of host ip addresses in pool, ...}
- pools_max = dict([(pool, (2 ** (32 - int(pool.split('/')[1])) - 2))
- for pool in self.pools])
-
- # Result: {pool: % utilization, ....} (percent)
- pools_util = dict([(pool, int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0)))
- for pool in self.pools])
-
- # Bulding dicts to send to netdata
- final_count = dict([(''.join(['le_', k]), v) for k, v in pools_count.items()])
- final_util = dict([(''.join(['ut_', k]), v) for k, v in pools_util.items()])
-
- to_netdata = {'total': len(active_leases)}
- to_netdata.update({'lsize': int(stat(self.leases_path)[6] / 1024)})
- to_netdata.update({'ptime': int(raw_leases[1])})
- to_netdata.update(final_util)
- to_netdata.update(final_count)
+ raw_leases, parse_time = raw_data[0], raw_data[1]
+ # Result: {ipaddress: end lease time, ...}
+ active_leases, to_netdata = list(), dict()
+ current_time = mktime(gmtime())
+
+ for ip, lease_end_time in raw_leases.items():
+ # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
+ if binding_active(lease_end_time=lease_end_time[7:-2],
+ current_time=current_time):
+ active_leases.append(ip_address(u'%s' % ip[6:-3]))
+
+ for pool in self.pools:
+ dim_id = pool.replace('.', '_')
+ pool_leases_count = len([ip for ip in active_leases if ip in self.pools[pool]['net']])
+ to_netdata[dim_id + '_active_leases'] = pool_leases_count
+ to_netdata[dim_id + '_utilization'] = float(pool_leases_count) / self.pools[pool]['num_hosts'] * 10000
+
+ to_netdata['leases_total'] = len(active_leases)
+ to_netdata['leases_size'] = stat(self.leases_path)[6]
+ to_netdata['parse_time'] = parse_time
return to_netdata
+ def create_charts(self):
+ for pool in self.pools:
+ dim, dim_id = pool, pool.replace('.', '_')
+ self.definitions['pools_utilization']['lines'].append([dim_id + '_utilization',
+ dim, 'absolute', 1, 100])
+ self.definitions['pools_active_leases']['lines'].append([dim_id + '_active_leases',
+ dim, 'absolute'])
-def is_binding_active(binding):
- return mktime(strptime(binding, '%w %Y/%m/%d %H:%M:%S')) - mktime(gmtime()) > 0
-
-def is_address_in(address, pool):
- return ipaddress(return_utf(address)) in ip_network(return_utf(pool))
+def binding_active(lease_end_time, current_time):
+ return mktime(strptime(lease_end_time, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
def find_lease(value):
@@ -155,10 +156,3 @@ def find_lease(value):
def find_ends(value):
return value[2:6] != 'ends'
-
-def return_utf(s):
- # python2 returns "<type 'str'>" for simple strings
- # python3 returns "<class 'str'>" for unicode strings
- if str(type(s)) == "<type 'str'>":
- return unicode(s, 'utf-8')
- return s
diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py
index 0f7d2b44..ca9aba56 100644
--- a/python.d/mdstat.chart.py
+++ b/python.d/mdstat.chart.py
@@ -2,98 +2,125 @@
# Description: mdstat netdata python.d module
# Author: l2isbad
+from re import compile as re_compile
+from collections import defaultdict
from base import SimpleService
-from re import compile
+
priority = 60000
retries = 60
update_every = 1
+OPERATIONS = ('check', 'resync', 'reshape', 'recovery', 'finish', 'speed')
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ['agr_health']
- self.definitions = {'agr_health':
- {'options':
- [None, 'Faulty devices in MD', 'failed disks', 'health', 'md.health', 'line'],
- 'lines': []}}
- self.proc_mdstat = '/proc/mdstat'
- self.regex_disks = compile(r'((?<=\ )[a-zA-Z_0-9]+(?= : active)).*?((?<= \[)[0-9]+)/([0-9]+(?=\] ))')
- self.regex_status = compile(r'([a-zA-Z_0-9]+)( : active)[^:]*?([a-z]+) = ([0-9.]+(?=%)).*?((?<=finish=)[0-9.]+)min speed=([0-9]+)')
+ self.regex = dict(disks=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
+ r'(?P<total_disks>[0-9]+)/'
+ r'(?P<inuse_disks>[0-9])\]'),
+ status=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
+ r'(?P<operation>[a-z]+) = '
+ r'(?P<operation_status>[0-9.]+).+finish='
+ r'(?P<finish>([0-9.]+))min speed='
+ r'(?P<speed>[0-9]+)'))
def check(self):
- raw_data = self._get_raw_data()
- if not raw_data:
- self.error('Cant read mdstat data from %s' % (self.proc_mdstat))
- return False
-
- md_list = [md[0] for md in self.regex_disks.findall(raw_data)]
-
- if not md_list:
- self.error('No active arrays in %s' % (self.proc_mdstat))
- return False
- else:
- for md in md_list:
- self.order.append(md)
- self.order.append(''.join([md, '_status']))
- self.order.append(''.join([md, '_rate']))
- self.definitions['agr_health']['lines'].append([''.join([md, '_health']), md, 'absolute'])
- self.definitions[md] = {'options':
- [None, '%s disks stats' % md, 'disks', md, 'md.disks', 'stacked'],
- 'lines': [[''.join([md, '_total']), 'total', 'absolute'],
- [''.join([md, '_inuse']), 'inuse', 'absolute']]}
- self.definitions[''.join([md, '_status'])] = {'options':
- [None, '%s current status' % md, 'percent', md, 'md.status', 'line'],
- 'lines': [[''.join([md, '_resync']), 'resync', 'absolute', 1, 100],
- [''.join([md, '_recovery']), 'recovery', 'absolute', 1, 100],
- [''.join([md, '_reshape']), 'reshape', 'absolute', 1, 100],
- [''.join([md, '_check']), 'check', 'absolute', 1, 100]]}
- self.definitions[''.join([md, '_rate'])] = {'options':
- [None, '%s operation status' % md, 'rate', md, 'md.rate', 'line'],
- 'lines': [[''.join([md, '_finishin']), 'finish min', 'absolute', 1, 100],
- [''.join([md, '_rate']), 'megabyte/s', 'absolute', -1, 100]]}
- self.info('Plugin was started successfully. MDs to monitor %s' % (md_list))
-
- return True
-
- def _get_raw_data(self):
+ arrays = find_arrays(self._get_raw_data(), self.regex)
+ if not arrays:
+ self.error('Failed to read data from /proc/mdstat or there is no active arrays')
+ return None
+
+ self.order, self.definitions = create_charts(arrays.keys())
+ return True
+
+ @staticmethod
+ def _get_raw_data():
"""
Read data from /proc/mdstat
:return: str
"""
try:
- with open(self.proc_mdstat, 'rt') as proc_mdstat:
- raw_result = proc_mdstat.read()
- except Exception:
+ with open('/proc/mdstat', 'rt') as proc_mdstat:
+ return proc_mdstat.readlines() or None
+ except (OSError, IOError):
return None
- else:
- raw_result = ' '.join(raw_result.split())
- return raw_result
def _get_data(self):
"""
Parse data from _get_raw_data()
:return: dict
"""
- raw_mdstat = self._get_raw_data()
- mdstat_disks = self.regex_disks.findall(raw_mdstat)
- mdstat_status = self.regex_status.findall(raw_mdstat)
- to_netdata = {}
-
- for md in mdstat_disks:
- to_netdata[''.join([md[0], '_total'])] = int(md[1])
- to_netdata[''.join([md[0], '_inuse'])] = int(md[2])
- to_netdata[''.join([md[0], '_health'])] = int(md[1]) - int(md[2])
- to_netdata[''.join([md[0], '_check'])] = 0
- to_netdata[''.join([md[0], '_resync'])] = 0
- to_netdata[''.join([md[0], '_reshape'])] = 0
- to_netdata[''.join([md[0], '_recovery'])] = 0
- to_netdata[''.join([md[0], '_finishin'])] = 0
- to_netdata[''.join([md[0], '_rate'])] = 0
-
- for md in mdstat_status:
- to_netdata[''.join([md[0], '_' + md[2]])] = round(float(md[3]) * 100)
- to_netdata[''.join([md[0], '_finishin'])] = round(float(md[4]) * 100)
- to_netdata[''.join([md[0], '_rate'])] = round(float(md[5]) / 1000 * 100)
+ raw_data = self._get_raw_data()
+ arrays = find_arrays(raw_data, self.regex)
+ if not arrays:
+ return None
+
+ to_netdata = dict()
+ for array, values in arrays.items():
+ for key, value in values.items():
+ to_netdata['_'.join([array, key])] = value
return to_netdata
+
+
+def find_arrays(raw_data, regex):
+ if raw_data is None:
+ return None
+ data = defaultdict(str)
+ counter = 1
+
+ for row in (elem.strip() for elem in raw_data):
+ if not row:
+ counter += 1
+ continue
+ data[counter] = ' '.join([data[counter], row])
+
+ arrays = dict()
+ for value in data.values():
+ match = regex['disks'].search(value)
+ if not match:
+ continue
+
+ match = match.groupdict()
+ array = match.pop('array')
+ arrays[array] = match
+ arrays[array]['health'] = int(match['total_disks']) - int(match['inuse_disks'])
+ for operation in OPERATIONS:
+ arrays[array][operation] = 0
+
+ match = regex['status'].search(value)
+ if match:
+ match = match.groupdict()
+ if match['operation'] in OPERATIONS:
+ arrays[array][match['operation']] = float(match['operation_status']) * 100
+ arrays[array]['finish'] = float(match['finish']) * 100
+ arrays[array]['speed'] = float(match['speed']) / 1000 * 100
+
+ return arrays or None
+
+
+def create_charts(arrays):
+ order = ['mdstat_health']
+ definitions = dict(mdstat_health={'options': [None, 'Faulty devices in MD', 'failed disks',
+ 'health', 'md.health', 'line'],
+ 'lines': []})
+ for md in arrays:
+ order.append(md)
+ order.append(md + '_status')
+ order.append(md + '_rate')
+ definitions['mdstat_health']['lines'].append([md + '_health', md, 'absolute'])
+ definitions[md] = {'options': [None, '%s disks stats' % md, 'disks', md, 'md.disks', 'stacked'],
+ 'lines': [[md + '_total_disks', 'total', 'absolute'],
+ [md + '_inuse_disks', 'inuse', 'absolute']]}
+ definitions[md + '_status'] = {'options': [None, '%s current status' % md,
+ 'percent', md, 'md.status', 'line'],
+ 'lines': [[md + '_resync', 'resync', 'absolute', 1, 100],
+ [md + '_recovery', 'recovery', 'absolute', 1, 100],
+ [md + '_reshape', 'reshape', 'absolute', 1, 100],
+ [md + '_check', 'check', 'absolute', 1, 100]]}
+ definitions[md + '_rate'] = {'options': [None, '%s operation status' % md,
+ 'rate', md, 'md.rate', 'line'],
+ 'lines': [[md + '_finish', 'finish min', 'absolute', 1, 100],
+ [md + '_speed', 'MB/s', 'absolute', -1, 100]]}
+ return order, definitions
diff --git a/python.d/mongodb.chart.py b/python.d/mongodb.chart.py
index c01bd293..bb4c44b0 100644
--- a/python.d/mongodb.chart.py
+++ b/python.d/mongodb.chart.py
@@ -19,7 +19,7 @@ except ImportError:
priority = 60000
retries = 60
-REPLSET_STATES = [
+REPL_SET_STATES = [
('1', 'primary'),
('8', 'down'),
('2', 'secondary'),
@@ -358,8 +358,8 @@ CHARTS = {
'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
'lines': [
- ['Metadata_r', 'intent_shared', 'incremental'],
- ['Metadata_w', 'intent_exclusive', 'incremental']
+ ['oplog_r', 'intent_shared', 'incremental'],
+ ['oplog_w', 'intent_exclusive', 'incremental']
]}
}
@@ -391,13 +391,16 @@ class Service(SimpleService):
self.build_metrics_to_collect_(server_status)
try:
- self._get_data()
+ data = self._get_data()
except (LookupError, SyntaxError, AttributeError):
self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
return False
- else:
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
self.create_charts_(server_status)
return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
def build_metrics_to_collect_(self, server_status):
@@ -473,7 +476,7 @@ class Service(SimpleService):
lines.append([dim_id, description, 'absolute', 1, 1])
return lines
- all_hosts = server_status['repl']['hosts']
+ all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
this_host = server_status['repl']['me']
other_hosts = [host for host in all_hosts if host != this_host]
@@ -503,19 +506,19 @@ class Service(SimpleService):
self.definitions[chart_name] = {
'options': [None, 'Replica set member (%s) current state' % host, 'state',
'replication and oplog', 'mongodb.replication_state', 'line'],
- 'lines': create_state_lines(REPLSET_STATES)}
+ 'lines': create_state_lines(REPL_SET_STATES)}
def _get_raw_data(self):
raw_data = dict()
- raw_data.update(self.get_serverstatus_() or dict())
- raw_data.update(self.get_dbstats_() or dict())
- raw_data.update(self.get_replsetgetstatus_() or dict())
- raw_data.update(self.get_getreplicationinfo_() or dict())
+ raw_data.update(self.get_server_status() or dict())
+ raw_data.update(self.get_db_stats() or dict())
+ raw_data.update(self.get_repl_set_get_status() or dict())
+ raw_data.update(self.get_get_replication_info() or dict())
return raw_data or None
- def get_serverstatus_(self):
+ def get_server_status(self):
raw_data = dict()
try:
raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
@@ -524,7 +527,7 @@ class Service(SimpleService):
else:
return raw_data
- def get_dbstats_(self):
+ def get_db_stats(self):
if not self.databases:
return None
@@ -533,24 +536,22 @@ class Service(SimpleService):
try:
for dbase in self.databases:
raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
+ return raw_data
except PyMongoError:
return None
- else:
- return raw_data
- def get_replsetgetstatus_(self):
+ def get_repl_set_get_status(self):
if not self.do_replica:
return None
raw_data = dict()
try:
raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
+ return raw_data
except PyMongoError:
return None
- else:
- return raw_data
- def get_getreplicationinfo_(self):
+ def get_get_replication_info(self):
if not (self.do_replica and 'local' in self.databases):
return None
@@ -561,10 +562,9 @@ class Service(SimpleService):
"$natural", ASCENDING).limit(1)[0]
raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
"$natural", DESCENDING).limit(1)[0]
+ return raw_data
except PyMongoError:
return None
- else:
- return raw_data
def _get_data(self):
"""
@@ -583,7 +583,7 @@ class Service(SimpleService):
utc_now = datetime.utcnow()
# serverStatus
- for metric, new_name, function in self.metrics_to_collect:
+ for metric, new_name, func in self.metrics_to_collect:
value = serverStatus
for key in metric.split('.'):
try:
@@ -592,7 +592,7 @@ class Service(SimpleService):
break
if not isinstance(value, dict) and key:
- to_netdata[new_name or key] = value if not function else function(value)
+ to_netdata[new_name or key] = value if not func else func(value)
to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
to_netdata['mapped'])
@@ -620,13 +620,13 @@ class Service(SimpleService):
if not member.get('self'):
other_hosts.append(member)
# Replica set time diff between current time and time when last entry from the oplog was applied
- if member['optimeDate'] != unix_epoch:
+ if member.get('optimeDate', unix_epoch) != unix_epoch:
member_optimedate = member['name'] + '_optimedate'
to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
multiplier=1000))})
# Replica set members state
member_state = member['name'] + '_state'
- for elem in REPLSET_STATES:
+ for elem in REPL_SET_STATES:
state = elem[0]
to_netdata.update({'_'.join([member_state, state]): 0})
to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
@@ -668,5 +668,4 @@ class Service(SimpleService):
def delta_calculation(delta, multiplier=1):
if hasattr(delta, 'total_seconds'):
return delta.total_seconds() * multiplier
- else:
- return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
+ return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py
index abf6bf71..cdabe971 100644
--- a/python.d/mysql.chart.py
+++ b/python.d/mysql.chart.py
@@ -117,7 +117,7 @@ GLOBAL_STATS = [
'Connection_errors_tcpwrap']
def slave_seconds(value):
- return value if value else -1
+ return value if value is not '' else -1
def slave_running(value):
return 1 if value == 'Yes' else -1
@@ -278,10 +278,10 @@ CHARTS = {
'innodb_rows': {
'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
- ['Innodb_rows_inserted', 'read', 'incremental'],
- ['Innodb_rows_read', 'deleted', 'incremental', -1, 1],
- ['Innodb_rows_updated', 'inserted', 'incremental', 1, 1],
- ['Innodb_rows_deleted', 'updated', 'incremental', -1, 1],
+ ['Innodb_rows_inserted', 'inserted', 'incremental'],
+ ['Innodb_rows_read', 'read', 'incremental', 1, 1],
+ ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
]},
'innodb_buffer_pool_pages': {
'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', 'mysql.innodb_buffer_pool_pages', 'line'],
diff --git a/python.d/ovpn_status_log.chart.py b/python.d/ovpn_status_log.chart.py
index c5fca002..3a7e8200 100644
--- a/python.d/ovpn_status_log.chart.py
+++ b/python.d/ovpn_status_log.chart.py
@@ -2,8 +2,10 @@
# Description: openvpn status log netdata python.d module
# Author: l2isbad
+from re import compile as r_compile
+from collections import defaultdict
from base import SimpleService
-from re import compile, findall, search, subn
+
priority = 60000
retries = 60
update_every = 10
@@ -11,67 +13,101 @@ update_every = 10
ORDER = ['users', 'traffic']
CHARTS = {
'users': {
- 'options': [None, 'OpenVPN active users', 'active users', 'Users', 'openvpn_status.users', 'line'],
+ 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
'lines': [
- ["users", None, "absolute"],
+ ['users', None, 'absolute'],
]},
'traffic': {
- 'options': [None, 'OpenVPN traffic', 'kilobit/s', 'Traffic', 'openvpn_status.traffic', 'area'],
+ 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [
- ["in", None, "incremental", 8, 1000], ["out", None, "incremental", 8, -1000]
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
]},
}
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.log_path = self.configuration.get('log_path')
- self.regex_data_inter = compile(r'(?<=Since ).*?(?=.ROUTING)')
- self.regex_data_final = compile(r'\d{1,3}(?:\.\d{1,3}){3}[:0-9,. ]*')
- self.regex_users = compile(r'\d{1,3}(?:\.\d{1,3}){3}:\d+')
- self.regex_traffic = compile(r'(?<=(?:,| ))\d+(?=(?:,| ))')
+ self.regex = dict(tls=r_compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d+)? (?P<bytes_in>\d+) (?P<bytes_out>\d+)'),
+ static_key=r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'))
+ self.to_netdata = dict(bytes_in=0, bytes_out=0)
def check(self):
- if not self._get_raw_data():
- self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ if not (self.log_path and isinstance(self.log_path, str)):
+ self.error('\'log_path\' is not defined')
return False
- else:
- self.info('Plugin was started succesfully')
+
+ data = False
+ for method in (self._get_data_tls, self._get_data_static_key):
+ data = method()
+ if data:
+ self._get_data = method
+ self._data_from_check = data
+ break
+
+ if data:
return True
+ self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ return False
def _get_raw_data(self):
"""
Open log file
:return: str
"""
+
try:
- with open(self.log_path, 'rt') as log:
- result = log.read()
- except Exception:
+ with open(self.log_path) as log:
+ raw_data = log.readlines() or None
+ except OSError:
return None
else:
- return result
+ return raw_data
- def _get_data(self):
+ def _get_data_static_key(self):
"""
Parse openvpn-status log file.
- Current regex version is ok for status-version 1, 2 and 3. Both users and bytes in/out are collecting.
"""
raw_data = self._get_raw_data()
- try:
- data_inter = self.regex_data_inter.search(' '.join(raw_data.splitlines())).group()
- except AttributeError:
- data_inter = ''
+ if not raw_data:
+ return None
+
+ data = defaultdict(lambda: 0)
+
+ for row in raw_data:
+ match = self.regex['static_key'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['direction'] == 'read':
+ data['bytes_in'] += int(match['bytes'])
+ else:
+ data['bytes_out'] += int(match['bytes'])
+
+ return data or None
+
+ def _get_data_tls(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
- data_final = ' '.join(self.regex_data_final.findall(data_inter))
- users = self.regex_users.subn('', data_final)[1]
- traffic = self.regex_traffic.findall(data_final)
+ data = defaultdict(lambda: 0)
+ for row in raw_data:
+ row = ' '.join(row.split(',')) if ',' in row else ' '.join(row.split())
+ match = self.regex['tls'].search(row)
+ if match:
+ match = match.groupdict()
+ data['users'] += 1
+ data['bytes_in'] += int(match['bytes_in'])
+ data['bytes_out'] += int(match['bytes_out'])
- bytes_in = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 1])
- bytes_out = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 0])
+ return data or None
- return {'users': users, 'in': bytes_in, 'out': bytes_out}
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py
index d359bb4f..ef710cb8 100644
--- a/python.d/postgres.chart.py
+++ b/python.d/postgres.chart.py
@@ -99,8 +99,8 @@ SELECT
sum(conflicts) AS conflicts,
pg_database_size(datname) AS size
FROM pg_stat_database
-WHERE NOT datname ~* '^template\d+'
-GROUP BY database_name;
+WHERE datname IN %(databases)s
+GROUP BY datname;
""",
BGWRITER="""
SELECT
@@ -146,7 +146,6 @@ SELECT current_setting('is_superuser') = 'on' AS is_superuser;
QUERY_STATS = {
QUERIES['DATABASE']: METRICS['DATABASE'],
QUERIES['BACKENDS']: METRICS['BACKENDS'],
- QUERIES['ARCHIVE']: METRICS['ARCHIVE'],
QUERIES['LOCKS']: METRICS['LOCKS']
}
@@ -242,6 +241,7 @@ class Service(SimpleService):
self.definitions = deepcopy(CHARTS)
self.table_stats = configuration.pop('table_stats', False)
self.index_stats = configuration.pop('index_stats', False)
+ self.database_poll = configuration.pop('database_poll', None)
self.configuration = configuration
self.connection = False
self.is_superuser = False
@@ -281,6 +281,9 @@ class Service(SimpleService):
is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
cursor.close()
+ if (self.database_poll and isinstance(self.database_poll, str)):
+ self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] or self.databases
+
self.locks_zeroed = populate_lock_types(self.databases)
self.add_additional_queries_(is_superuser)
self.create_dynamic_charts_()
@@ -296,6 +299,7 @@ class Service(SimpleService):
QUERY_STATS[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
if is_superuser:
QUERY_STATS[QUERIES['BGWRITER']] = METRICS['BGWRITER']
+ QUERY_STATS[QUERIES['ARCHIVE']] = METRICS['ARCHIVE']
def create_dynamic_charts_(self):
@@ -328,7 +332,7 @@ class Service(SimpleService):
return None
def query_stats_(self, cursor, query, metrics):
- cursor.execute(query)
+ cursor.execute(query, dict(databases=tuple(self.databases)))
for row in cursor:
for metric in metrics:
dimension_id = '_'.join([row['database_name'], metric]) if 'database_name' in row else metric
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
index 859300ec..a643cc6a 100644
--- a/python.d/python_modules/base.py
+++ b/python.d/python_modules/base.py
@@ -20,23 +20,20 @@
import time
import os
import socket
-import select
import threading
-import msg
import ssl
from subprocess import Popen, PIPE
from sys import exc_info
-
+from glob import glob
+import re
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
-
try:
import urllib.request as urllib2
except ImportError:
import urllib2
-
try:
import MySQLdb
PYMYSQL = True
@@ -46,6 +43,7 @@ except ImportError:
PYMYSQL = True
except ImportError:
PYMYSQL = False
+import msg
try:
PATH = os.getenv('PATH').split(':')
@@ -175,14 +173,15 @@ class SimpleService(threading.Thread):
# it is important to do this in a loop
# sleep() is interruptable
while now < next:
- self.debug("sleeping for", str(next - now), "secs to reach frequency of", str(step), "secs, now:", str(now), " next:", str(next), " penalty:", str(penalty))
+ self.debug("sleeping for", str(next - now), "secs to reach frequency of",
+ str(step), "secs, now:", str(now), " next:", str(next), " penalty:", str(penalty))
time.sleep(next - now)
now = float(time.time())
# do the job
try:
status = self._run_once()
- except Exception as e:
+ except Exception:
status = False
if status:
@@ -202,10 +201,12 @@ class SimpleService(threading.Thread):
penalty = 600
self.retries_left = self.retries
- self.alert("failed to collect data for " + str(self.retries) + " times - increasing penalty to " + str(penalty) + " sec and trying again")
+ self.alert("failed to collect data for " + str(self.retries) +
+ " times - increasing penalty to " + str(penalty) + " sec and trying again")
else:
- self.error("failed to collect data - " + str(self.retries_left) + " retries left - penalty: " + str(penalty) + " sec")
+ self.error("failed to collect data - " + str(self.retries_left)
+ + " retries left - penalty: " + str(penalty) + " sec")
# --- CHART ---
@@ -460,11 +461,42 @@ class SimpleService(threading.Thread):
return next(('/'.join([p, binary]) for p in PATH
if os.path.isfile('/'.join([p, binary]))
and os.access('/'.join([p, binary]), os.X_OK)))
- else:
- return None
+ return None
except StopIteration:
return None
+ def _add_new_dimension(self, dimension_id, chart_name, dimension=None, algorithm='incremental',
+ multiplier=1, divisor=1, priority=65000):
+ """
+ :param dimension_id:
+ :param chart_name:
+ :param dimension:
+ :param algorithm:
+ :param multiplier:
+ :param divisor:
+ :param priority:
+ :return:
+ """
+ if not all([dimension_id not in self._dimensions,
+ chart_name in self.order,
+ chart_name in self.definitions]):
+ return
+ self._dimensions.append(dimension_id)
+ dimension_list = list(map(str, [dimension_id,
+ dimension if dimension else dimension_id,
+ algorithm,
+ multiplier,
+ divisor]))
+ self.definitions[chart_name]['lines'].append(dimension_list)
+ add_to_name = self.override_name or self.name
+ job_name = ('_'.join([self.__module__, re.sub('\s+', '_', add_to_name)])
+ if add_to_name != 'None' else self.__module__)
+ chart = 'CHART {0}.{1} '.format(job_name, chart_name)
+ options = '"" "{0}" {1} "{2}" {3} {4} '.format(*self.definitions[chart_name]['options'][1:6])
+ other = '{0} {1}\n'.format(priority, self.update_every)
+ new_dimension = "DIMENSION {0}\n".format(' '.join(dimension_list))
+ print(chart + options + other + new_dimension)
+
class UrlService(SimpleService):
def __init__(self, configuration=None, name=None):
@@ -473,47 +505,73 @@ class UrlService(SimpleService):
self.user = self.configuration.get('user')
self.password = self.configuration.get('pass')
self.ss_cert = self.configuration.get('ss_cert')
+ self.proxy = self.configuration.get('proxy')
- def __add_openers(self):
- def self_signed_cert(ss_cert):
- if ss_cert:
- try:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
- return urllib2.build_opener(urllib2.HTTPSHandler(context=ctx))
- except AttributeError:
- return None
- else:
- return None
+ def __add_openers(self, user=None, password=None, ss_cert=None, proxy=None, url=None):
+ user = user or self.user
+ password = password or self.password
+ ss_cert = ss_cert or self.ss_cert
+ proxy = proxy or self.proxy
- self.opener = self_signed_cert(self.ss_cert) or urllib2.build_opener()
+ handlers = list()
- # HTTP Basic Auth
- if self.user and self.password:
- url_parse = urlparse(self.url)
+ # HTTP Basic Auth handler
+ if all([user, password, isinstance(user, str), isinstance(password, str)]):
+ url = url or self.url
+ url_parse = urlparse(url)
top_level_url = '://'.join([url_parse.scheme, url_parse.netloc])
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- passman.add_password(None, top_level_url, self.user, self.password)
- self.opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
+ passman.add_password(None, top_level_url, user, password)
+ handlers.append(urllib2.HTTPBasicAuthHandler(passman))
self.debug("Enabling HTTP basic auth")
- def _get_raw_data(self, custom_url=None):
+ # HTTPS handler
+ # Self-signed certificate ignore
+ if ss_cert:
+ try:
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+ except AttributeError:
+ self.error('HTTPS self-signed certificate ignore not enabled')
+ else:
+ handlers.append(urllib2.HTTPSHandler(context=ctx))
+ self.debug("Enabling HTTP self-signed certificate ignore")
+
+ # PROXY handler
+ if proxy and isinstance(proxy, str) and not ss_cert:
+ handlers.append(urllib2.ProxyHandler(dict(http=proxy)))
+ self.debug("Enabling HTTP proxy handler (%s)" % proxy)
+
+ opener = urllib2.build_opener(*handlers)
+ return opener
+
+ def _build_opener(self, **kwargs):
+ try:
+ return self.__add_openers(**kwargs)
+ except TypeError as error:
+ self.error('build_opener() error:', str(error))
+ return None
+
+ def _get_raw_data(self, url=None, opener=None):
"""
Get raw data from http request
:return: str
"""
- raw_data = None
- f = None
+ data = None
try:
- f = self.opener.open(custom_url or self.url, timeout=self.update_every * 2)
- raw_data = f.read().decode('utf-8', 'ignore')
+ opener = opener or self.opener
+ data = opener.open(url or self.url, timeout=self.update_every * 2)
+ raw_data = data.read().decode('utf-8', 'ignore')
+ except urllib2.URLError as error:
+ self.error('Url: %s. Error: %s' % (url or self.url, str(error)))
+ return None
except Exception as error:
- self.error('Url: %s. Error: %s' %(custom_url or self.url, str(error)))
+ self.error(str(error))
return None
finally:
- if f is not None: f.close()
-
+ if data is not None:
+ data.close()
return raw_data or None
def check(self):
@@ -525,7 +583,7 @@ class UrlService(SimpleService):
self.error('URL is not defined or type is not <str>')
return False
- self.__add_openers()
+ self.opener = self.__add_openers()
try:
data = self._get_data()
@@ -781,57 +839,69 @@ class SocketService(SimpleService):
class LogService(SimpleService):
def __init__(self, configuration=None, name=None):
- self.log_path = ""
- self._last_position = 0
- # self._log_reader = None
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('path')
+ self.__glob_path = self.log_path
+ self._last_position = 0
self.retries = 100000 # basically always retry
+ self.__re_find = dict(current=0, run=0, maximum=60)
def _get_raw_data(self):
"""
Get log lines since last poll
:return: list
"""
- lines = []
+ lines = list()
try:
- if os.path.getsize(self.log_path) < self._last_position:
+ if self.__re_find['current'] == self.__re_find['run']:
+ self._find_recent_log_file()
+ size = os.path.getsize(self.log_path)
+ if size == self._last_position:
+ self.__re_find['current'] += 1
+ return list() # return empty list if nothing has changed
+ elif size < self._last_position:
self._last_position = 0 # read from beginning if file has shrunk
- elif os.path.getsize(self.log_path) == self._last_position:
- self.debug("Log file hasn't changed. No new data.")
- return [] # return empty list if nothing has changed
- with open(self.log_path, "r") as fp:
+
+ with open(self.log_path) as fp:
fp.seek(self._last_position)
- for i, line in enumerate(fp):
+ for line in fp:
lines.append(line)
self._last_position = fp.tell()
- except Exception as e:
- self.error(str(e))
+ self.__re_find['current'] = 0
+ except (OSError, IOError) as error:
+ self.__re_find['current'] += 1
+ self.error(str(error))
- if len(lines) != 0:
- return lines
- else:
- self.error("No data collected.")
- return None
+ return lines or None
+
+ def _find_recent_log_file(self):
+ """
+ :return:
+ """
+ self.__re_find['run'] = self.__re_find['maximum']
+ self.__re_find['current'] = 0
+ self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
+ path_list = glob(self.__glob_path)
+ if path_list:
+ self.log_path = max(path_list)
+ return True
+ return False
def check(self):
"""
Parse basic configuration and check if log file exists
:return: boolean
"""
- if self.name is not None or self.name != str(None):
- self.name = ""
- else:
- self.name = str(self.name)
- try:
- self.log_path = str(self.configuration['path'])
- except (KeyError, TypeError):
- self.info("No path to log specified. Using: '" + self.log_path + "'")
+ if not self.log_path:
+ self.error("No path to log specified")
+ return None
- if os.access(self.log_path, os.R_OK):
+ if all([self._find_recent_log_file(),
+ os.access(self.log_path, os.R_OK),
+ os.path.isfile(self.log_path)]):
return True
- else:
- self.error("Cannot access file: '" + self.log_path + "'")
- return False
+ self.error("Cannot access %s" % self.log_path)
+ return False
def create(self):
# set cursor at last byte of log file
@@ -847,7 +917,7 @@ class ExecutableService(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.command = None
- def _get_raw_data(self):
+ def _get_raw_data(self, stderr=False):
"""
Get raw data from executed command
:return: <list>
@@ -855,10 +925,11 @@ class ExecutableService(SimpleService):
try:
p = Popen(self.command, stdout=PIPE, stderr=PIPE)
except Exception as error:
- self.error("Executing command", self.command, "resulted in error:", str(error))
+ self.error("Executing command", " ".join(self.command), "resulted in error:", str(error))
return None
data = list()
- for line in p.stdout.readlines():
+ std = p.stderr if stderr else p.stdout
+ for line in std.readlines():
data.append(line.decode())
return data or None
diff --git a/python.d/rabbitmq.chart.py b/python.d/rabbitmq.chart.py
new file mode 100644
index 00000000..15a6a80f
--- /dev/null
+++ b/python.d/rabbitmq.chart.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+# Description: rabbitmq netdata python.d module
+# Author: l2isbad
+
+from base import UrlService
+from socket import gethostbyname, gaierror
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from threading import Thread
+from collections import namedtuple
+from json import loads
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 60
+
+METHODS = namedtuple('METHODS', ['get_data_function', 'url', 'stats'])
+
+NODE_STATS = [('fd_used', None),
+ ('mem_used', None),
+ ('sockets_used', None),
+ ('proc_used', None),
+ ('disk_free', None)
+ ]
+OVERVIEW_STATS = [('object_totals.channels', None),
+ ('object_totals.consumers', None),
+ ('object_totals.connections', None),
+ ('object_totals.queues', None),
+ ('object_totals.exchanges', None),
+ ('queue_totals.messages_ready', None),
+ ('queue_totals.messages_unacknowledged', None),
+ ('message_stats.ack', None),
+ ('message_stats.redeliver', None),
+ ('message_stats.deliver', None),
+ ('message_stats.publish', None)
+ ]
+ORDER = ['queued_messages', 'message_rates', 'global_counts',
+ 'file_descriptors', 'socket_descriptors', 'erlang_processes', 'memory', 'disk_space']
+
+CHARTS = {
+ 'file_descriptors': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview',
+ 'rabbitmq.file_descriptors', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute']
+ ]},
+ 'memory': {
+ 'options': [None, 'Memory', 'MB', 'overview',
+ 'rabbitmq.memory', 'line'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1024 << 10]
+ ]},
+ 'disk_space': {
+ 'options': [None, 'Disk Space', 'GB', 'overview',
+ 'rabbitmq.disk_space', 'line'],
+ 'lines': [
+ ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
+ ]},
+ 'socket_descriptors': {
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview',
+ 'rabbitmq.sockets', 'line'],
+ 'lines': [
+ ['sockets_used', 'used', 'absolute']
+ ]},
+ 'erlang_processes': {
+ 'options': [None, 'Erlang Processes', 'processes', 'overview',
+ 'rabbitmq.processes', 'line'],
+ 'lines': [
+ ['proc_used', 'used', 'absolute']
+ ]},
+ 'global_counts': {
+ 'options': [None, 'Global Counts', 'counts', 'overview',
+ 'rabbitmq.global_counts', 'line'],
+ 'lines': [
+ ['channels', None, 'absolute'],
+ ['consumers', None, 'absolute'],
+ ['connections', None, 'absolute'],
+ ['queues', None, 'absolute'],
+ ['exchanges', None, 'absolute']
+ ]},
+ 'queued_messages': {
+ 'options': [None, 'Queued Messages', 'messages', 'overview',
+ 'rabbitmq.queued_messages', 'stacked'],
+ 'lines': [
+ ['messages_ready', 'ready', 'absolute'],
+ ['messages_unacknowledged', 'unacknowledged', 'absolute']
+ ]},
+ 'message_rates': {
+ 'options': [None, 'Message Rates', 'messages/s', 'overview',
+ 'rabbitmq.message_rates', 'stacked'],
+ 'lines': [
+ ['ack', None, 'incremental'],
+ ['redeliver', None, 'incremental'],
+ ['deliver', None, 'incremental'],
+ ['publish', None, 'incremental']
+ ]}
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 15672)
+ self.scheme = self.configuration.get('scheme', 'http')
+
+ def check(self):
+ # We can't start if <host> AND <port> not specified
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ # Hostname -> ip address
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+
+ # Add handlers (auth, self signed cert accept)
+ url = '%s://%s:%s/api' % (self.scheme, self.host, self.port)
+ self.opener = self._build_opener(url=url)
+ if not self.opener:
+ return False
+ # Add methods
+ api_node = url + '/nodes'
+ api_overview = url + '/overview'
+ self.methods = [METHODS(get_data_function=self._get_overview_stats, url=api_node, stats=NODE_STATS),
+ METHODS(get_data_function=self._get_overview_stats, url=api_overview, stats=OVERVIEW_STATS)]
+
+ result = self._get_data()
+ if not result:
+ self.error('_get_data() returned no data')
+ return False
+ self._data_from_check = result
+ return True
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data_function, args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ data = data[0] if isinstance(data, list) else data
+
+ to_netdata = fetch_data_(raw_data=data, metrics_list=stats)
+ return queue.put(to_netdata)
+
+
+def fetch_data_(raw_data, metrics_list):
+ to_netdata = dict()
+ for metric, new_name in metrics_list:
+ value = raw_data
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except (KeyError, TypeError):
+ break
+ if not isinstance(value, dict):
+ to_netdata[new_name or key] = value
+ return to_netdata
diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py
index 61f4f6d6..4bc1d41f 100644
--- a/python.d/redis.chart.py
+++ b/python.d/redis.chart.py
@@ -100,13 +100,12 @@ class Service(SocketService):
:return: dict
"""
if self.passwd:
- info_request = self.request
self.request = "AUTH " + self.passwd + "\r\n"
raw = self._get_raw_data().strip()
if raw != "+OK":
self.error("invalid password")
return None
- self.request = info_request
+ self.request = "INFO\r\n"
response = self._get_raw_data()
if response is None:
# error has already been logged
@@ -155,7 +154,7 @@ class Service(SocketService):
:return: boolean
"""
length = len(data)
- supposed = data.split('\n')[0][1:]
+ supposed = data.split('\n')[0][1:-1]
offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
if not supposed.isdigit():
return True
diff --git a/python.d/samba.chart.py b/python.d/samba.chart.py
new file mode 100644
index 00000000..767c9746
--- /dev/null
+++ b/python.d/samba.chart.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+from base import ExecutableService
+import re
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+ORDER = ['syscall_rw','smb2_rw','smb2_create_close','smb2_info','smb2_find','smb2_notify','smb2_sm_count']
+
+CHARTS = {
+ 'syscall_rw': {
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ],
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area']
+ },
+ 'smb2_rw': {
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ],
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area']
+ },
+ 'smb2_create_close': {
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ],
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line']
+ },
+ 'smb2_info': {
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ],
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line']
+ },
+ 'smb2_find': {
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ],
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line']
+ },
+ 'smb2_notify': {
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ],
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line']
+ },
+ 'smb2_sm_count': {
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ],
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked']
+ }
+ }
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ sudo_binary, smbstatus_binary = self.find_binary('sudo'), self.find_binary('smbstatus')
+
+ if not (sudo_binary and smbstatus_binary):
+ self.error('Can\'t locate \'sudo\' or \'smbstatus\' binary')
+ return False
+
+ self.command = [sudo_binary, '-v']
+ err = self._get_raw_data(stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/python.d/smartd_log.chart.py b/python.d/smartd_log.chart.py
index e8037237..4039c153 100644
--- a/python.d/smartd_log.chart.py
+++ b/python.d/smartd_log.chart.py
@@ -2,7 +2,7 @@
# Description: smart netdata python.d module
# Author: l2isbad, vorph1
-from re import compile
+from re import compile as r_compile
from os import listdir, access, R_OK
from os.path import isfile, join, getsize, basename, isdir
try:
@@ -101,7 +101,7 @@ NAMED_DISKS = namedtuple('disks', ['name', 'size', 'number'])
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- self.regex = compile(r'(\d+);(\d+);(\d+)')
+ self.regex = r_compile(r'(\d+);(\d+);(\d+)')
self.log_path = self.configuration.get('log_path', '/var/log/smartd')
self.raw_values = self.configuration.get('raw_values')
self.attr = self.configuration.get('smart_attributes', [])
@@ -197,18 +197,19 @@ class Service(SimpleService):
result.append(['_'.join([name, attrid]), name[:name.index('.')], 'absolute'])
return result
- # Add additional smart attributes to the ORDER. If something goes wrong we don't care.
+ # Use configured attributes, if present. If something goes wrong we don't care.
+ order = ORDER
try:
- ORDER.extend(list(set(self.attr.split()) & SMART_ATTR.keys() - set(ORDER)))
+ order = [attr for attr in self.attr.split() if attr in SMART_ATTR.keys()] or ORDER
except Exception:
pass
- self.order = [''.join(['attrid', i]) for i in ORDER]
+ self.order = [''.join(['attrid', i]) for i in order]
self.definitions = dict()
units = 'raw' if self.raw_values else 'normalized'
for k, v in dict([(k, v) for k, v in SMART_ATTR.items() if k in ORDER]).items():
self.definitions.update({''.join(['attrid', k]): {
- 'options': [None, v, units, v, 'smartd.attrid' + k, 'line'],
+ 'options': [None, v, units, v.lower(), 'smartd.attrid' + k, 'line'],
'lines': create_lines(k)}})
def find_disks_in_log_path(log_path):
diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py
index cbc8cd23..564c9f1d 100644
--- a/python.d/web_log.chart.py
+++ b/python.d/web_log.chart.py
@@ -1,37 +1,51 @@
# -*- coding: utf-8 -*-
# Description: web log netdata python.d module
# Author: l2isbad
-
-from base import LogService
import re
import bisect
from os import access, R_OK
from os.path import getsize
-from collections import namedtuple
+from collections import namedtuple, defaultdict
from copy import deepcopy
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+from base import LogService
+import msg
+
priority = 60000
retries = 60
-ORDER = ['response_statuses', 'response_codes', 'bandwidth', 'response_time', 'requests_per_url', 'http_method',
- 'http_version', 'requests_per_ipproto', 'clients', 'clients_all']
-CHARTS = {
+ORDER_APACHE_CACHE = ['apache_cache']
+
+ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth', 'response_time', 'response_time_upstream',
+ 'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version',
+ 'requests_per_ipproto', 'clients', 'clients_all']
+
+ORDER_SQUID = ['squid_response_statuses', 'squid_response_codes', 'squid_detailed_response_codes',
+ 'squid_method', 'squid_mime_type', 'squid_hier_code', 'squid_transport_methods',
+ 'squid_transport_errors', 'squid_code', 'squid_handling_opts', 'squid_object_types',
+ 'squid_cache_events', 'squid_bytes', 'squid_duration', 'squid_clients', 'squid_clients_all']
+
+CHARTS_WEB = {
'response_codes': {
'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
'lines': [
- ['2xx', '2xx', 'incremental'],
- ['5xx', '5xx', 'incremental'],
- ['3xx', '3xx', 'incremental'],
- ['4xx', '4xx', 'incremental'],
- ['1xx', '1xx', 'incremental'],
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
['0xx', 'other', 'incremental'],
- ['unmatched', 'unmatched', 'incremental']
+ ['unmatched', None, 'incremental']
]},
'bandwidth': {
- 'options': [None, 'Bandwidth', 'KB/s', 'bandwidth', 'web_log.bandwidth', 'area'],
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
'lines': [
- ['resp_length', 'received', 'incremental', 1, 1024],
- ['bytes_sent', 'sent', 'incremental', -1, 1024]
+ ['resp_length', 'received', 'incremental', 8, 1000],
+ ['bytes_sent', 'sent', 'incremental', -8, 1000]
]},
'response_time': {
'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
@@ -40,6 +54,14 @@ CHARTS = {
['resp_time_max', 'max', 'incremental', 1, 1000],
['resp_time_avg', 'avg', 'incremental', 1, 1000]
]},
+ 'response_time_upstream': {
+ 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
+ 'web_log.response_time_upstream', 'area'],
+ 'lines': [
+ ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
+ ]},
'clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
'lines': [
@@ -77,36 +99,160 @@ CHARTS = {
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
+ ]},
+ 'requests_per_url': {
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url',
+ 'stacked'],
+ 'lines': [
+ ['url_pattern_other', 'other', 'incremental', 1, 1]
+ ]},
+ 'requests_per_user_defined': {
+ 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
+ 'web_log.requests_per_user_defined', 'stacked'],
+ 'lines': [
+ ['user_pattern_other', 'other', 'incremental', 1, 1]
]}
}
-NAMED_URL_PATTERN = namedtuple('URL_PATTERN', ['description', 'pattern'])
+CHARTS_APACHE_CACHE = {
+ 'apache_cache': {
+ 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
+ 'stacked'],
+ 'lines': [
+ ["hit", 'cache', "percentage-of-absolute-row"],
+ ["miss", None, "percentage-of-absolute-row"],
+ ["other", None, "percentage-of-absolute-row"]
+ ]}
+}
+
+CHARTS_SQUID = {
+ 'squid_duration': {
+ 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
+ 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
+ 'lines': [
+ ['duration_min', 'min', 'incremental', 1, 1000],
+ ['duration_max', 'max', 'incremental', 1, 1000],
+ ['duration_avg', 'avg', 'incremental', 1, 1000]
+ ]},
+ 'squid_bytes': {
+ 'options': [None, 'Amount Of Data Delivered To The Clients',
+ 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
+ 'lines': [
+ ['bytes', 'sent', 'incremental', 8, 1000]
+ ]},
+ 'squid_response_statuses': {
+ 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
+ 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]},
+ 'squid_response_codes': {
+ 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
+ 'web_log.squid_response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', None, 'incremental'],
+ ['other', None, 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]},
+ 'squid_code': {
+ 'options': [None, 'Responses Per Cache Result Of The Request',
+ 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_detailed_response_codes': {
+ 'options': [None, 'Detailed Response Codes',
+ 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_hier_code': {
+ 'options': [None, 'Responses Per Hierarchy Code',
+ 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_method': {
+ 'options': [None, 'Requests Per Method',
+ 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_mime_type': {
+ 'options': [None, 'Requests Per MIME Type',
+ 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients', 'stacked'],
+ 'lines': [
+ ['unique_ipv4', 'ipv4', 'incremental'],
+ ['unique_ipv6', 'ipv6', 'incremental']
+ ]},
+ 'squid_clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute'],
+ ['unique_tot_ipv6', 'ipv6', 'absolute']
+ ]},
+ 'squid_transport_methods': {
+ 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_methods', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_transport_errors': {
+ 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_errors', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_handling_opts': {
+ 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
+ 'web_log.squid_handling_opts', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_object_types': {
+ 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
+ 'web_log.squid_object_types', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_cache_events': {
+ 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
+ 'web_log.squid_cache_events', 'stacked'],
+ 'lines': [
+ ]}
+}
+
+NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
+SQUID_CODES = dict(TCP='squid_transport_methods', UDP='squid_transport_methods', NONE='squid_transport_methods',
+ CLIENT='squid_handling_opts', IMS='squid_handling_opts', ASYNC='squid_handling_opts',
+ SWAPFAIL='squid_handling_opts', REFRESH='squid_handling_opts', SHARED='squid_handling_opts',
+ REPLY='squid_handling_opts', NEGATIVE='squid_object_types', STALE='squid_object_types',
+ OFFLINE='squid_object_types', INVALID='squid_object_types', FAIL='squid_object_types',
+ MODIFIED='squid_object_types', UNMODIFIED='squid_object_types', REDIRECT='squid_object_types',
+ HIT='squid_cache_events', MEM='squid_cache_events', MISS='squid_cache_events',
+ DENIED='squid_cache_events', NOFETCH='squid_cache_events', TUNNEL='squid_cache_events',
+ ABORTED='squid_transport_errors', TIMEOUT='squid_transport_errors')
+
class Service(LogService):
def __init__(self, configuration=None, name=None):
"""
:param configuration:
:param name:
- # self._get_data = None # will be assigned in 'check' method.
- # self.order = None # will be assigned in 'create_*_method' method.
- # self.definitions = None # will be assigned in 'create_*_method' method.
"""
LogService.__init__(self, configuration=configuration, name=name)
- # Variables from module configuration file
- self.log_type = self.configuration.get('type', 'web_access')
+ self.log_type = self.configuration.get('type', 'web')
self.log_path = self.configuration.get('path')
- self.url_pattern = self.configuration.get('categories') # dict
- self.custom_log_format = self.configuration.get('custom_log_format') # dict
- # Instance variables
- self.regex = None # will be assigned in 'find_regex' or 'find_regex_custom' method
- self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
- 'resp_time_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
- '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
- 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
def check(self):
"""
@@ -117,11 +263,18 @@ class Service(LogService):
3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
4. other checks depends on log "type"
"""
+
+ log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
+
+ if self.log_type not in log_types:
+ self.error('bad log type (%s). Supported types: %s' % (self.log_type, log_types.keys()))
+ return False
+
if not self.log_path:
self.error('log path is not specified')
return False
- if not access(self.log_path, R_OK):
+ if not (self._find_recent_log_file() and access(self.log_path, R_OK)):
self.error('%s not readable or not exist' % self.log_path)
return False
@@ -129,357 +282,237 @@ class Service(LogService):
self.error('%s is empty' % self.log_path)
return False
+ self.configuration['update_every'] = self.update_every
+ self.configuration['name'] = self.name
+ self.configuration['override_name'] = self.override_name
+ self.configuration['_dimensions'] = self._dimensions
+ self.configuration['path'] = self.log_path
+
+ cls = log_types[self.log_type]
+ self.Job = cls(configuration=self.configuration)
+ if self.Job.check():
+ self.order = self.Job.order
+ self.definitions = self.Job.definitions
+ self.info('Current log file: %s' % self.log_path)
+ return True
+ return False
+
+ def _get_data(self):
+ return self.Job.get_data(self._get_raw_data())
+
+
+class Mixin:
+ def filter_data(self, raw_data):
+ """
+ :param raw_data: list
+ :return:
+ """
+ if not self.pre_filter:
+ return raw_data
+ filtered = raw_data
+ for elem in self.pre_filter:
+ if elem.description == 'filter_include':
+ filtered = filter(elem.func, filtered)
+ elif elem.description == 'filter_exclude':
+ filtered = filterfalse(elem.func, filtered)
+ return filtered
+
+ def add_new_dimension(self, dimension_id, chart_key, dimension=None,
+ algorithm='incremental', multiplier=1, divisor=1):
+ """
+ :param dimension:
+ :param chart_key:
+ :param dimension_id:
+ :param algorithm:
+ :param multiplier:
+ :param divisor:
+ :return:
+ """
+
+ self.data[dimension_id] = 0
+ # SET method check if dim in _dimensions
+ self.conf['_dimensions'].append(dimension_id)
+ # UPDATE method do SET only if dim in definitions
+ dimension_list = list(map(str, [dimension_id,
+ dimension if dimension else dimension_id,
+ algorithm,
+ multiplier,
+ divisor]))
+ self.definitions[chart_key]['lines'].append(dimension_list)
+ job_name = find_job_name(self.conf['override_name'], self.conf['name'])
+ opts = self.definitions[chart_key]['options']
+ chart = 'CHART %s.%s "" "%s" %s "%s" %s %s 60000 %s\n' % (job_name, chart_key,
+ opts[1], opts[2], opts[3],
+ opts[4], opts[5], self.conf['update_every'])
+ print(chart + "DIMENSION %s\n" % ' '.join(dimension_list))
+
+ def get_last_line(self):
+ """
+ Reads last line from the log file
+ :return: str:
+ """
# Read last line (or first if there is only one line)
- with open(self.log_path, 'rb') as logs:
+ with open(self.conf['path'], 'rb') as logs:
logs.seek(-2, 2)
while logs.read(1) != b'\n':
logs.seek(-2, 1)
if logs.tell() == 0:
break
last_line = logs.readline()
-
try:
- last_line = last_line.decode()
+ return last_line.decode()
except UnicodeDecodeError:
try:
- last_line = last_line.decode(encoding='utf-8')
+ return last_line.decode(encoding='utf-8')
except (TypeError, UnicodeDecodeError) as error:
- self.error(str(error))
+ msg.error('web_log', str(error))
return False
- if self.log_type == 'web_access':
- self.unique_all_time = list() # sorted list of unique IPs
- self.detailed_response_codes = self.configuration.get('detailed_response_codes', True)
- self.detailed_response_aggregate = self.configuration.get('detailed_response_aggregate', True)
- self.all_time = self.configuration.get('all_time', True)
+ @staticmethod
+ def error(*params):
+ msg.error('web_log', ' '.join(map(str, params)))
- # Custom_log_format or predefined log format.
- if self.custom_log_format:
- match_dict, error = self.find_regex_custom(last_line)
- else:
- match_dict, error = self.find_regex(last_line)
+ @staticmethod
+ def info(*params):
+ msg.info('web_log', ' '.join(map(str, params)))
- # "match_dict" is None if there are any problems
- if match_dict is None:
- self.error(str(error))
- return False
- # self.url_pattern check
- if self.url_pattern:
- self.url_pattern = check_req_per_url_pattern('rpu', self.url_pattern)
+class Web(Mixin):
+ def __init__(self, configuration):
+ self.conf = configuration
+ self.pre_filter = check_patterns('filter', self.conf.get('filter'))
+ self.storage = dict()
+ self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
+ 'resp_time_avg': 0, 'resp_time_upstream_min': 0, 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
+ '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
+ 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
+ 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
- self.create_access_charts(match_dict) # Create charts
- self._get_data = self._get_access_data # _get_data assignment
- else:
- self.error('Not implemented')
+ def check(self):
+ last_line = self.get_last_line()
+ if not last_line:
return False
-
- # Double check
- if not self.regex:
- self.error('That can not happen, but it happened. "regex" is None')
-
- self.info('Collected data: %s' % list(match_dict.keys()))
- return True
-
- def find_regex_custom(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
-
- We are here only if "custom_log_format" is in logs. We need to make sure:
- 1. "custom_log_format" is a dict
- 2. "pattern" in "custom_log_format" and pattern is <str> instance
- 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
-
- If all parameters is ok we need to make sure:
- 1. Pattern search is success
- 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
-
- If pattern search is success we need to make sure:
- 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
-
- If this is True we need to make sure:
- 1. All mandatory key values from "match_dict" have the correct format
- ("code" is integer, "method" is uppercase word, etc)
-
- If non mandatory keys in "match_dict" we need to make sure:
- 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
- ("resp_length" is integer or "-", "resp_time" is integer or float)
-
- """
- if not hasattr(self.custom_log_format, 'keys'):
- return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
-
- pattern = self.custom_log_format.get('pattern')
- if not (pattern and isinstance(pattern, str)):
- return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
-
- resp_time_func = self.custom_log_format.get('time_multiplier') or 0
-
- if not isinstance(resp_time_func, int):
- return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
-
- try:
- regex = re.compile(pattern)
- except re.error as error:
- return find_regex_return(msg='Pattern compile error: %s' % str(error))
-
- match = regex.search(last_line)
- if match:
- match_dict = match.groupdict() or None
+ # Custom_log_format or predefined log format.
+ if self.conf.get('custom_log_format'):
+ match_dict, error = self.find_regex_custom(last_line)
else:
- return find_regex_return(msg='Custom log: pattern search FAILED')
+ match_dict, error = self.find_regex(last_line)
+ # "match_dict" is None if there are any problems
if match_dict is None:
- find_regex_return(msg='Custom log: search OK but contains no named subgroups'
- ' (you need to use ?P<subgroup_name>)')
- else:
- mandatory_dict = {'address': r'[\da-f.:]+',
- 'code': r'[1-9]\d{2}',
- 'method': r'[A-Z]+',
- 'bytes_sent': r'\d+|-'}
- optional_dict = {'resp_length': r'\d+',
- 'resp_time': r'[\d.]+',
- 'http_version': r'\d\.\d'}
-
- mandatory_values = set(mandatory_dict) - set(match_dict)
- if mandatory_values:
- return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
- % list(mandatory_values))
- else:
- for key in mandatory_dict:
- if not re.search(mandatory_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- optional_values = set(optional_dict) & set(match_dict)
- for key in optional_values:
- if not re.search(optional_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- dot_in_time = '.' in match_dict.get('resp_time', '')
- if dot_in_time:
- self.resp_time_func = lambda time: time * (resp_time_func or 1000000)
- else:
- self.resp_time_func = lambda time: time * (resp_time_func or 1)
-
- self.regex = regex
- return find_regex_return(match_dict=match_dict)
-
- def find_regex(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
- We need to find appropriate pattern for current log file
- All logic is do a regex search through the string for all predefined patterns
- until we find something or fail.
- """
- # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
- # 5. Bytes sent 6. Response length 7. Response process time
- acs_default = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)')
-
- acs_apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+) ')
-
- acs_apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' .*?'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+)'
- r'(?: |$)')
-
- acs_nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+) ')
-
- acs_nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' .*?'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+)')
-
- def func_usec(time):
- return time
-
- def func_sec(time):
- return time * 1000000
-
- r_regex = [acs_apache_ext_insert, acs_apache_ext_append, acs_nginx_ext_insert,
- acs_nginx_ext_append, acs_default]
- r_function = [func_usec, func_usec, func_sec, func_sec, func_usec]
- regex_function = zip(r_regex, r_function)
-
- match_dict = dict()
- for regex, function in regex_function:
- match = regex.search(last_line)
- if match:
- self.regex = regex
- self.resp_time_func = function
- match_dict = match.groupdict()
- break
+ self.error(str(error))
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['url_pattern'] = check_patterns('url_pattern', self.conf.get('categories'))
+ self.storage['user_pattern'] = check_patterns('user_pattern', self.conf.get('user_defined'))
- return find_regex_return(match_dict=match_dict or None,
- msg='Unknown log format. You need to use "custom_log_format" feature.')
+ self.create_web_charts(match_dict) # Create charts
+ self.info('Collected data: %s' % list(match_dict.keys()))
+ return True
- def create_access_charts(self, match_dict):
+ def create_web_charts(self, match_dict):
"""
:param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
:return:
- Create additional charts depending on the 'match_dict' keys and configuration file options
- 1. 'time_response' chart is removed if there is no 'resp_time' in match_dict.
- 2. Other stuff is just remove/add chart depending on yes/no in conf
+ Create/remove additional charts depending on the 'match_dict' keys and configuration file options
"""
+ self.order = ORDER_WEB[:]
+ self.definitions = deepcopy(CHARTS_WEB)
- def find_job_name(override_name, name):
- """
- :param override_name: str: 'name' var from configuration file
- :param name: str: 'job_name' from configuration file
- :return: str: new job name
- We need this for dynamic charts. Actually same logic as in python.d.plugin.
- """
- add_to_name = override_name or name
- if add_to_name:
- return '_'.join(['web_log', re.sub('\s+', '_', add_to_name)])
- else:
- return 'web_log'
-
- self.order = ORDER[:]
- self.definitions = deepcopy(CHARTS)
-
- job_name = find_job_name(self.override_name, self.name)
-
- self.http_method_chart = 'CHART %s.http_method' \
- ' "" "Requests Per HTTP Method" requests/s "http methods"' \
- ' web_log.http_method stacked 11 %s\n' \
- 'DIMENSION GET GET incremental\n' % (job_name, self.update_every)
- self.http_version_chart = 'CHART %s.http_version' \
- ' "" "Requests Per HTTP Version" requests/s "http versions"' \
- ' web_log.http_version stacked 12 %s\n' % (job_name, self.update_every)
-
- # Remove 'request_time' chart from ORDER if resp_time not in match_dict
if 'resp_time' not in match_dict:
self.order.remove('response_time')
- # Remove 'clients_all' chart from ORDER if specified in the configuration
- if not self.all_time:
+ if 'resp_time_upstream' not in match_dict:
+ self.order.remove('response_time_upstream')
+
+ if not self.conf.get('all_time', True):
self.order.remove('clients_all')
+
# Add 'detailed_response_codes' chart if specified in the configuration
- if self.detailed_response_codes:
- self.detailed_chart = list()
- for prio, add_to_dim in enumerate(DET_RESP_AGGR):
- self.detailed_chart.append('CHART %s.detailed_response_codes%s ""'
- ' "Detailed Response Codes %s" requests/s responses'
- ' web_log.detailed_response_codes%s stacked %s %s\n'
- % (job_name, add_to_dim, add_to_dim[1:], add_to_dim,
- str(prio), self.update_every))
-
- codes = DET_RESP_AGGR[:1] if self.detailed_response_aggregate else DET_RESP_AGGR[1:]
+ if self.conf.get('detailed_response_codes', True):
+ codes = DET_RESP_AGGR[:1] if self.conf.get('detailed_response_aggregate', True) else DET_RESP_AGGR[1:]
for code in codes:
self.order.append('detailed_response_codes%s' % code)
- self.definitions['detailed_response_codes%s' % code] = {'options':
- [None,
- 'Detailed Response Codes %s' % code[1:],
- 'requests/s',
- 'responses',
- 'web_log.detailed_response_codes%s' % code,
- 'stacked'],
- 'lines': []}
+ self.definitions['detailed_response_codes%s' % code] \
+ = {'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []}
# Add 'requests_per_url' chart if specified in the configuration
- if self.url_pattern:
- self.definitions['requests_per_url'] = {'options': [None, 'Requests Per Url', 'requests/s',
- 'urls', 'web_log.requests_per_url', 'stacked'],
- 'lines': [['rpu_other', 'other', 'incremental']]}
- for elem in self.url_pattern:
- self.definitions['requests_per_url']['lines'].append([elem.description, elem.description[4:],
+ if self.storage['url_pattern']:
+ for elem in self.storage['url_pattern']:
+ self.definitions['requests_per_url']['lines'].append([elem.description,
+ elem.description[12:],
'incremental'])
- self.data.update({elem.description: 0})
- self.data.update({'rpu_other': 0})
+ self.data[elem.description] = 0
+ self.data['url_pattern_other'] = 0
else:
self.order.remove('requests_per_url')
- def add_new_dimension(self, dimension, line_list, chart_string, key):
- """
- :param dimension: str: response status code. Ex.: '202', '499'
- :param line_list: list: Ex.: ['202', '202', 'incremental']
- :param chart_string: Current string we need to pass to netdata to rebuild the chart
- :param key: str: CHARTS dict key (chart name). Ex.: 'response_time'
- :return: str: new chart string = previous + new dimensions
- """
- self.data.update({dimension: 0})
- # SET method check if dim in _dimensions
- self._dimensions.append(dimension)
- # UPDATE method do SET only if dim in definitions
- self.definitions[key]['lines'].append(line_list)
- chart = chart_string
- chart += "%s %s\n" % ('DIMENSION', ' '.join(line_list))
- print(chart)
- return chart
+ # Add 'requests_per_user_defined' chart if specified in the configuration
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ for elem in self.storage['user_pattern']:
+ self.definitions['requests_per_user_defined']['lines'].append([elem.description,
+ elem.description[13:],
+ 'incremental'])
+ self.data[elem.description] = 0
+ self.data['user_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_user_defined')
- def _get_access_data(self):
+ def get_data(self, raw_data=None):
"""
- Parse new log lines
+ Parses new log lines
:return: dict OR None
None if _get_raw_data method fails.
In all other cases - dict.
"""
- raw = self._get_raw_data()
- if raw is None:
- return None
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = self.filter_data(raw_data=raw_data)
+
+ unique_current = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
- request_time, unique_current = list(), list()
- request_counter = {'count': 0, 'sum': 0}
ip_address_counter = {'unique_cur_ip': 0}
- for line in raw:
- match = self.regex.search(line)
+ for line in filtered_data:
+ match = self.storage['regex'].search(line)
if match:
match_dict = match.groupdict()
try:
- code = ''.join([match_dict['code'][0], 'xx'])
+ code = match_dict['code'][0] + 'xx'
self.data[code] += 1
except KeyError:
self.data['0xx'] += 1
# detailed response code
- if self.detailed_response_codes:
- self._get_data_detailed_response_codes(match_dict['code'])
+ if self.conf.get('detailed_response_codes', True):
+ self.get_data_per_response_codes_detailed(code=match_dict['code'])
# response statuses
- self._get_data_statuses(match_dict['code'])
+ self.get_data_per_statuses(code=match_dict['code'])
# requests per url
- if self.url_pattern:
- self._get_data_per_url(match_dict['url'])
+ if self.storage['url_pattern']:
+ self.get_data_per_pattern(row=match_dict['url'],
+ other='url_pattern_other',
+ pattern=self.storage['url_pattern'])
+ # requests per user defined pattern
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ self.get_data_per_pattern(row=match_dict['user_defined'],
+ other='user_pattern_other',
+ pattern=self.storage['user_pattern'])
# requests per http method
- self._get_data_http_method(match_dict['method'])
+ if match_dict['method'] not in self.data:
+ self.add_new_dimension(dimension_id=match_dict['method'],
+ chart_key='http_method')
+ self.data[match_dict['method']] += 1
# requests per http version
if 'http_version' in match_dict:
- self._get_data_http_version(match_dict['http_version'])
+ dim_id = match_dict['http_version'].replace('.', '_')
+ if dim_id not in self.data:
+ self.add_new_dimension(dimension_id=dim_id,
+ chart_key='http_version',
+ dimension=match_dict['http_version'])
+ self.data[dim_id] += 1
# bandwidth sent
bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
self.data['bytes_sent'] += int(bytes_sent)
@@ -487,92 +520,245 @@ class Service(LogService):
if 'resp_length' in match_dict:
self.data['resp_length'] += int(match_dict['resp_length'])
if 'resp_time' in match_dict:
- resp_time = self.resp_time_func(float(match_dict['resp_time']))
- bisect.insort_left(request_time, resp_time)
- request_counter['count'] += 1
- request_counter['sum'] += resp_time
+ get_timings(timings=timings['resp_time'],
+ time=self.storage['func_resp_time'](float(match_dict['resp_time'])))
+ if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
+ get_timings(timings=timings['resp_time_upstream'],
+ time=self.storage['func_resp_time'](float(match_dict['resp_time_upstream'])))
# requests per ip proto
proto = 'ipv4' if '.' in match_dict['address'] else 'ipv6'
self.data['req_' + proto] += 1
# unique clients ips
- if address_not_in_pool(self.unique_all_time, match_dict['address'],
- self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match_dict['address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
self.data['unique_tot_' + proto] += 1
- if address_not_in_pool(unique_current, match_dict['address'], ip_address_counter['unique_cur_ip']):
+ if match_dict['address'] not in unique_current:
self.data['unique_cur_' + proto] += 1
- ip_address_counter['unique_cur_ip'] += 1
+ unique_current.add(match_dict['address'])
else:
self.data['unmatched'] += 1
# timings
- if request_time:
- self.data['resp_time_min'] += int(request_time[0])
- self.data['resp_time_avg'] += int(round(float(request_counter['sum']) / request_counter['count']))
- self.data['resp_time_max'] += int(request_time[-1])
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
return self.data
- def _get_data_detailed_response_codes(self, code):
+ def find_regex(self, last_line):
"""
- :param code: str: CODE from parsed line. Ex.: '202, '499'
- :return:
- Calls add_new_dimension method If the value is found for the first time
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+ We need to find appropriate pattern for current log file
+ All logic is do a regex search through the string for all predefined patterns
+ until we find something or fail.
"""
- if code not in self.data:
- if self.detailed_response_aggregate:
- chart_string_copy = self.detailed_chart[0]
- self.detailed_chart[0] = self.add_new_dimension(code, [code, code, 'incremental'],
- chart_string_copy, 'detailed_response_codes')
- else:
- code_index = int(code[0]) if int(code[0]) < 6 else 6
- chart_string_copy = self.detailed_chart[code_index]
- chart_name = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
- self.detailed_chart[code_index] = self.add_new_dimension(code, [code, code, 'incremental'],
- chart_string_copy, chart_name)
- self.data[code] += 1
+ # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
+ # 5. Bytes sent 6. Response length 7. Response process time
+ default = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)')
+
+ apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+) ')
+
+ apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+)'
+ r'(?: |$)')
+
+ nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+) ')
+
+ nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)'
+ r' (?P<resp_time_upstream>[\d.-]+) ')
+
+ nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)')
+
+ def func_usec(time):
+ return time
+
+ def func_sec(time):
+ return time * 1000000
+
+ r_regex = [apache_ext_insert, apache_ext_append,
+ nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
+ default]
+ r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
+ regex_function = zip(r_regex, r_function)
+
+ match_dict = dict()
+ for regex, func in regex_function:
+ match = regex.search(last_line)
+ if match:
+ self.storage['regex'] = regex
+ self.storage['func_resp_time'] = func
+ match_dict = match.groupdict()
+ break
+
+ return find_regex_return(match_dict=match_dict or None,
+ msg='Unknown log format. You need to use "custom_log_format" feature.')
- def _get_data_http_method(self, method):
+ def find_regex_custom(self, last_line):
"""
- :param method: str: METHOD from parsed line. Ex.: 'GET', 'POST'
- :return:
- Calls add_new_dimension method If the value is found for the first time
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+
+ We are here only if "custom_log_format" is in logs. We need to make sure:
+ 1. "custom_log_format" is a dict
+ 2. "pattern" in "custom_log_format" and pattern is <str> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
+
+ If all parameters is ok we need to make sure:
+ 1. Pattern search is success
+ 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
+
+ If pattern search is success we need to make sure:
+ 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
+
+ If this is True we need to make sure:
+ 1. All mandatory key values from "match_dict" have the correct format
+ ("code" is integer, "method" is uppercase word, etc)
+
+ If non mandatory keys in "match_dict" we need to make sure:
+ 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
+ ("resp_length" is integer or "-", "resp_time" is integer or float)
+
"""
- if method not in self.data:
- chart_string_copy = self.http_method_chart
- self.http_method_chart = self.add_new_dimension(method, [method, method, 'incremental'],
- chart_string_copy, 'http_method')
- self.data[method] += 1
+ if not hasattr(self.conf.get('custom_log_format'), 'keys'):
+ return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
+
+ pattern = self.conf.get('custom_log_format', dict()).get('pattern')
+ if not (pattern and isinstance(pattern, str)):
+ return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
+
+ resp_time_func = self.conf.get('custom_log_format', dict()).get('time_multiplier') or 0
+
+ if not isinstance(resp_time_func, int):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
+
+ try:
+ regex = re.compile(pattern)
+ except re.error as error:
+ return find_regex_return(msg='Pattern compile error: %s' % str(error))
+ match = regex.search(last_line)
+ if not match:
+ return find_regex_return(msg='Custom log: pattern search FAILED')
- def _get_data_http_version(self, http_version):
+ match_dict = match.groupdict() or None
+ if match_dict is None:
+ return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
+ ' (you need to use ?P<subgroup_name>)')
+ mandatory_dict = {'address': r'[\da-f.:]+',
+ 'code': r'[1-9]\d{2}',
+ 'method': r'[A-Z]+',
+ 'bytes_sent': r'\d+|-'}
+ optional_dict = {'resp_length': r'\d+',
+ 'resp_time': r'[\d.]+',
+ 'resp_time_upstream': r'[\d.-]+',
+ 'http_version': r'\d\.\d'}
+
+ mandatory_values = set(mandatory_dict) - set(match_dict)
+ if mandatory_values:
+ return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
+ % list(mandatory_values))
+ for key in mandatory_dict:
+ if not re.search(mandatory_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ optional_values = set(optional_dict) & set(match_dict)
+ for key in optional_values:
+ if not re.search(optional_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ dot_in_time = '.' in match_dict.get('resp_time', '')
+ if dot_in_time:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
+ else:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
+
+ self.storage['regex'] = regex
+ return find_regex_return(match_dict=match_dict)
+
+ def get_data_per_response_codes_detailed(self, code):
"""
- :param http_version: str: METHOD from parsed line. Ex.: '1.1', '1.0'
+ :param code: str: CODE from parsed line. Ex.: '202, '499'
:return:
Calls add_new_dimension method If the value is found for the first time
"""
- http_version_dim_id = http_version.replace('.', '_')
- if http_version_dim_id not in self.data:
- chart_string_copy = self.http_version_chart
- self.http_version_chart = self.add_new_dimension(http_version_dim_id,
- [http_version_dim_id, http_version, 'incremental'],
- chart_string_copy, 'http_version')
- self.data[http_version_dim_id] += 1
-
- def _get_data_per_url(self, url):
+ if code not in self.data:
+ if self.conf.get('detailed_response_aggregate', True):
+ self.add_new_dimension(dimension_id=code,
+ chart_key='detailed_response_codes')
+ else:
+ code_index = int(code[0]) if int(code[0]) < 6 else 6
+ chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
+ self.add_new_dimension(dimension_id=code,
+ chart_key=chart_key)
+ self.data[code] += 1
+
+ def get_data_per_pattern(self, row, other, pattern):
"""
- :param url: str: URL from parsed line
+ :param row: str:
+ :param other: str:
+ :param pattern: named tuple: (['pattern_description', 'regular expression'])
:return:
Scan through string looking for the first location where patterns produce a match for all user
defined patterns
"""
match = None
- for elem in self.url_pattern:
- if elem.pattern.search(url):
+ for elem in pattern:
+ if elem.func(row):
self.data[elem.description] += 1
match = True
break
if not match:
- self.data['rpu_other'] += 1
+ self.data[other] += 1
- def _get_data_statuses(self, code):
+ def get_data_per_statuses(self, code):
"""
:param code: str: response status code. Ex.: '202', '499'
:return:
@@ -590,23 +776,209 @@ class Service(LogService):
self.data['other_requests'] += 1
+class ApacheCache:
+ def __init__(self, configuration):
+ self.conf = configuration
+ self.order = ORDER_APACHE_CACHE
+ self.definitions = CHARTS_APACHE_CACHE
+
+ @staticmethod
+ def check():
+ return True
+
+ @staticmethod
+ def get_data(raw_data=None):
+ data = dict(hit=0, miss=0, other=0)
+ if not raw_data:
+ return None if raw_data is None else data
+
+ for line in raw_data:
+ if 'cache hit' in line:
+ data['hit'] += 1
+ elif 'cache miss' in line:
+ data['miss'] += 1
+ else:
+ data['other'] += 1
+ return data
+
+
+class Squid(Mixin):
+ def __init__(self, configuration):
+ self.conf = configuration
+ self.order = ORDER_SQUID
+ self.definitions = CHARTS_SQUID
+ self.pre_filter = check_patterns('filter', self.conf.get('filter'))
+ self.storage = dict()
+ self.data = {'duration_max': 0, 'duration_avg': 0, 'duration_min': 0, 'bytes': 0,
+ '0xx': 0, '1xx': 0, '2xx': 0, '3xx': 0, '4xx': 0, '5xx': 0,
+ 'other': 0, 'unmatched': 0, 'unique_ipv4': 0, 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
+ 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0
+ }
+
+ def check(self):
+ last_line = self.get_last_line()
+ if not last_line:
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
+ r' (?P<client_address>[\da-f.:]+)'
+ r' (?P<squid_code>[A-Z_]+)/'
+ r'(?P<http_code>[0-9]+)'
+ r' (?P<bytes>[0-9]+)'
+ r' (?P<method>[A-Z_]+)'
+ r' (?P<url>[^ ]+)'
+ r' (?P<user>[^ ]+)'
+ r' (?P<hier_code>[A-Z_]+)/[\da-f.:-]+'
+ r' (?P<mime_type>[^\n]+)')
+
+ match = self.storage['regex'].search(last_line)
+ if not match:
+ self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
+ return False
+ self.storage['dynamic'] = {
+ 'http_code':
+ {'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None},
+ 'hier_code': {
+ 'chart': 'squid_hier_code',
+ 'func_dim_id': None,
+ 'func_dim': lambda v: v.replace('HIER_', '')},
+ 'method': {
+ 'chart': 'squid_method',
+ 'func_dim_id': None,
+ 'func_dim': None},
+ 'mime_type': {
+ 'chart': 'squid_mime_type',
+ 'func_dim_id': lambda v: v.split('/')[0],
+ 'func_dim': None}}
+ return True
+
+ def get_data(self, raw_data=None):
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = self.filter_data(raw_data=raw_data)
+
+ unique_ip = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for row in filtered_data:
+ match = self.storage['regex'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['duration'] != '0':
+ get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
+ try:
+ self.data[match['http_code'][0] + 'xx'] += 1
+ except KeyError:
+ self.data['other'] += 1
+
+ self.get_data_per_statuses(match['http_code'])
+
+ self.get_data_per_squid_code(match['squid_code'])
+
+ self.data['bytes'] += int(match['bytes'])
+
+ proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
+ # unique clients ips
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match['client_address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+
+ if match['client_address'] not in unique_ip:
+ self.data['unique_' + proto] += 1
+ unique_ip.add(match['client_address'])
+
+ for key, values in self.storage['dynamic'].items():
+ if match[key] == '-':
+ continue
+ dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
+ if dimension_id not in self.data:
+ dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
+ self.add_new_dimension(dimension_id=dimension_id,
+ chart_key=values['chart'],
+ dimension=dimension)
+ self.data[dimension_id] += 1
+ else:
+ self.data['unmatched'] += 1
+
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+ return self.data
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1' or code == '000':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5' or code_class == '6':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+ def get_data_per_squid_code(self, code):
+ """
+ :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
+ :return:
+ """
+ if code not in self.data:
+ self.add_new_dimension(dimension_id=code, chart_key='squid_code')
+ self.data[code] += 1
+ if '_' not in code:
+ return
+ for tag in code.split('_'):
+ try:
+ chart_key = SQUID_CODES[tag]
+ except KeyError:
+ continue
+ if tag not in self.data:
+ self.add_new_dimension(dimension_id=tag, chart_key=chart_key)
+ self.data[tag] += 1
+
+
+def get_timings(timings, time):
+ """
+ :param timings:
+ :param time:
+ :return:
+ """
+ if timings['minimum'] is None:
+ timings['minimum'] = time
+ if time > timings['maximum']:
+ timings['maximum'] = time
+ elif time < timings['minimum']:
+ timings['minimum'] = time
+ timings['summary'] += time
+ timings['count'] += 1
+
+
def address_not_in_pool(pool, address, pool_size):
"""
:param pool: list of ip addresses
:param address: ip address
:param pool_size: current pool size
- :return: True if address not in pool. False if address in pool.
+ :return: True if address not in pool. False otherwise.
"""
index = bisect.bisect_left(pool, address)
if index < pool_size:
if pool[index] == address:
return False
- else:
- bisect.insort_left(pool, address)
- return True
- else:
bisect.insort_left(pool, address)
return True
+ bisect.insort_left(pool, address)
+ return True
def find_regex_return(match_dict=None, msg='Generic error message'):
@@ -618,36 +990,53 @@ def find_regex_return(match_dict=None, msg='Generic error message'):
return match_dict, msg
-def check_req_per_url_pattern(string, url_pattern):
+def check_patterns(string, dimension_regex_dict):
"""
:param string: str:
- :param url_pattern: dict: ex. {'dim1': 'pattern1>', 'dim2': '<pattern2>'}
+ :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
:return: list of named tuples or None:
We need to make sure all patterns are valid regular expressions
"""
- if not hasattr(url_pattern, 'keys'):
+ if not hasattr(dimension_regex_dict, 'keys'):
return None
result = list()
- def is_valid_pattern(pattern):
+ def valid_pattern(pattern):
"""
:param pattern: str
:return: re.compile(pattern) or None
"""
if not isinstance(pattern, str):
return False
- else:
- try:
- compile_pattern = re.compile(pattern)
- except re.error:
- return False
- else:
- return compile_pattern
+ try:
+ return re.compile(pattern)
+ except re.error:
+ return False
- for dimension, regex in url_pattern.items():
- valid_pattern = is_valid_pattern(regex)
- if isinstance(dimension, str) and valid_pattern:
- result.append(NAMED_URL_PATTERN(description='_'.join([string, dimension]), pattern=valid_pattern))
+ def func_search(pattern):
+ def closure(v):
+ return pattern.search(v)
+
+ return closure
+ for dimension, regex in dimension_regex_dict.items():
+ valid = valid_pattern(regex)
+ if isinstance(dimension, str) and valid_pattern:
+ func = func_search(valid)
+ result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
+ func=func))
return result or None
+
+
+def find_job_name(override_name, name):
+ """
+ :param override_name: str: 'name' var from configuration file
+ :param name: str: 'job_name' from configuration file
+ :return: str: new job name
+ We need this for dynamic charts. Actually the same logic as in python.d.plugin.
+ """
+ add_to_name = override_name or name
+ if add_to_name:
+ return '_'.join(['web_log', re.sub('\s+', '_', add_to_name)])
+ return 'web_log'