diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2018-11-07 12:19:29 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2018-11-07 12:20:17 +0000 |
commit | a64a253794ac64cb40befee54db53bde17dd0d49 (patch) | |
tree | c1024acc5f6e508814b944d99f112259bb28b1be /collectors/python.d.plugin/rethinkdbs | |
parent | New upstream version 1.10.0+dfsg (diff) | |
download | netdata-a64a253794ac64cb40befee54db53bde17dd0d49.tar.xz netdata-a64a253794ac64cb40befee54db53bde17dd0d49.zip |
New upstream version 1.11.0+dfsgupstream/1.11.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin/rethinkdbs')
-rw-r--r-- | collectors/python.d.plugin/rethinkdbs/Makefile.inc | 13 | ||||
-rw-r--r-- | collectors/python.d.plugin/rethinkdbs/README.md | 34 | ||||
-rw-r--r-- | collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py | 235 | ||||
-rw-r--r-- | collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf | 78 |
4 files changed, 360 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc new file mode 100644 index 000000000..dec604464 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += rethinkdbs/rethinkdbs.chart.py +dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc + diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md new file mode 100644 index 000000000..5d357fa49 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/README.md @@ -0,0 +1,34 @@ +# rethinkdbs + +Module monitor rethinkdb health metrics. + +Following charts are drawn: + +1. **Connected Servers** + * connected + * missing + +2. **Active Clients** + * active + +3. **Queries** per second + * queries + +4. **Documents** per second + * documents + +### configuration + +```yaml + +localhost: + name : 'local' + host : '127.0.0.1' + port : 28015 + user : "user" + password : "pass" +``` + +When no configuration file is found, module tries to connect to `127.0.0.1:28015`. + +--- diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py new file mode 100644 index 000000000..127e9ad4b --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py @@ -0,0 +1,235 @@ +# -*- coding: utf-8 -*- +# Description: rethinkdb netdata python.d module +# Author: Ilya Mashchenko (l2isbad) +# SPDX-License-Identifier: GPL-3.0-or-later + +try: + import rethinkdb as rdb + HAS_RETHINKDB = True +except ImportError: + HAS_RETHINKDB = False + +from bases.FrameworkServices.SimpleService import SimpleService + +ORDER = [ + 'cluster_connected_servers', + 'cluster_clients_active', + 'cluster_queries', + 'cluster_documents', +] + + +def cluster_charts(): + return { + 'cluster_connected_servers': { + 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers', + 'stacked'], + 'lines': [ + ['cluster_servers_connected', 'connected'], + ['cluster_servers_missing', 'missing'], + ] + }, + 'cluster_clients_active': { + 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active', + 'line'], + 'lines': [ + ['cluster_clients_active', 'active'], + ] + }, + 'cluster_queries': { + 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'], + 'lines': [ + ['cluster_queries_per_sec', 'queries'], + ] + }, + 'cluster_documents': { + 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'], + 'lines': [ + ['cluster_read_docs_per_sec', 'reads'], + ['cluster_written_docs_per_sec', 'writes'], + ] + }, + } + + +def server_charts(n): + o = [ + '{0}_client_connections'.format(n), + '{0}_clients_active'.format(n), + '{0}_queries'.format(n), + '{0}_documents'.format(n), + ] + f = 'server {0}'.format(n) + + c = { + o[0]: { + 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'], + 'lines': [ + ['{0}_client_connections'.format(n), 'connections'], + ] + }, + o[1]: { + 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'], + 'lines': [ + ['{0}_clients_active'.format(n), 'active'], + ] + }, + o[2]: { + 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'], + 'lines': [ + ['{0}_queries_total'.format(n), 'queries', 'incremental'], + ] + }, + o[3]: { + 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'], + 'lines': [ + ['{0}_read_docs_total'.format(n), 'reads', 'incremental'], + ['{0}_written_docs_total'.format(n), 'writes', 'incremental'], + ] + }, + } + + return o, c + + +class Cluster: + def __init__(self, raw): + self.raw = raw + + def data(self): + qe = self.raw['query_engine'] + + return { + 'cluster_clients_active': qe['clients_active'], + 'cluster_queries_per_sec': qe['queries_per_sec'], + 'cluster_read_docs_per_sec': qe['read_docs_per_sec'], + 'cluster_written_docs_per_sec': qe['written_docs_per_sec'], + 'cluster_servers_connected': 0, + 'cluster_servers_missing': 0, + } + + +class Server: + def __init__(self, raw): + self.name = raw['server'] + self.raw = raw + + def error(self): + return self.raw.get('error') + + def data(self): + qe = self.raw['query_engine'] + + d = { + 'client_connections': qe['client_connections'], + 'clients_active': qe['clients_active'], + 'queries_total': qe['queries_total'], + 'read_docs_total': qe['read_docs_total'], + 'written_docs_total': qe['written_docs_total'], + } + + return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d) + + +class Service(SimpleService): + def __init__(self, configuration=None, name=None): + SimpleService.__init__(self, configuration=configuration, name=name) + self.order = list(ORDER) + self.definitions = cluster_charts() + + self.host = self.configuration.get('host', '127.0.0.1') + self.port = self.configuration.get('port', 28015) + self.user = self.configuration.get('user', 'admin') + self.password = self.configuration.get('password') + self.timeout = self.configuration.get('timeout', 2) + + self.conn = None + self.alive = True + + def check(self): + if not HAS_RETHINKDB: + self.error('"rethinkdb" module is needed to use rethinkdbs.py') + return False + + if not self.connect(): + return None + + stats = self.get_stats() + + if not stats: + return None + + for v in stats[1:]: + if get_id(v) == 'server': + o, c = server_charts(v['server']) + self.order.extend(o) + self.definitions.update(c) + + return True + + def get_data(self): + if not self.is_alive(): + return None + + stats = self.get_stats() + + if not stats: + return None + + data = dict() + + # cluster + data.update(Cluster(stats[0]).data()) + + # servers + for v in stats[1:]: + if get_id(v) != 'server': + continue + + s = Server(v) + + if s.error(): + data['cluster_servers_missing'] += 1 + else: + data['cluster_servers_connected'] += 1 + data.update(s.data()) + + return data + + def get_stats(self): + try: + return list(rdb.db('rethinkdb').table('stats').run(self.conn).items) + except rdb.errors.ReqlError: + self.alive = False + return None + + def connect(self): + try: + self.conn = rdb.connect( + host=self.host, + port=self.port, + user=self.user, + password=self.password, + timeout=self.timeout, + ) + self.alive = True + return True + except rdb.errors.ReqlError as error: + self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error)) + return False + + def reconnect(self): + # The connection is already closed after rdb.errors.ReqlError, + # so we do not need to call conn.close() + if self.connect(): + return True + return False + + def is_alive(self): + if not self.alive: + return self.reconnect() + return True + + +def get_id(v): + return v['id'][0] diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf new file mode 100644 index 000000000..73544fc2e --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf @@ -0,0 +1,78 @@ +# netdata python.d.plugin configuration for rethinkdb +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, rethinkdb also supports the following: +# +# host: IP or HOSTNAME # default is 'localhost' +# port: PORT # default is 28015 +# user: USERNAME # default is 'admin' +# password: PASSWORD # not set by default +# timeout: TIMEOUT # default is 2 + +# ---------------------------------------------------------------------- +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +local: + name: 'local' + host: 'localhost' |